Pull request #4648 updated Connecting to https://api.github.com using dgl-bot-personal-access-key220906 Connecting to https://api.github.com to check permissions of obtain list of yaox12 for dmlc/dgl Obtained Jenkinsfile from 37ed78035f406940ed88a4b60cadd2b7cfde5fea+d78a3a4baf611b90871a849f58647160c7cd9ab4 (2a6b7c3d5b495238d0d9cc8640b20ee0facfdf2f) [Pipeline] Start of Pipeline [Pipeline] node Running on dgl-manual-cpu-worker in /home/ubuntu/jenkins_new/workspace/dgl_PR-4648 [Pipeline] { [Pipeline] stage [Pipeline] { (Declarative: Checkout SCM) [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Fetching changes from the remote Git repository Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" > git rev-parse --resolve-git-dir /home/ubuntu/jenkins_new/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-list --no-walk 213b27ce5e5aec17b70af3c5ce56e3d7abfbda0f # timeout=10 Cleaning workspace > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] } [Pipeline] // stage [Pipeline] withEnv [Pipeline] { [Pipeline] stage [Pipeline] { (Authentication) [Pipeline] node Running on dgci-worker-permanent in /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648 [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Fetching changes from the remote Git repository Cleaning workspace Fetching without tags > git rev-parse --resolve-git-dir /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.25.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace [Pipeline] withEnv [Pipeline] { > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker pull dgllib/dgl-ci-lint Using default tag: latest latest: Pulling from dgllib/dgl-ci-lint Digest: sha256:11b87b4425630143a2560fe6ad584f245ee0a86bc58b40cda3c2be93656b0d5e Status: Image is up to date for dgllib/dgl-ci-lint:latest docker.io/dgllib/dgl-ci-lint:latest [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker inspect -f . dgllib/dgl-ci-lint . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dgci-worker-permanent does not seem to be running inside a container $ docker run -t -d -u 0:0 -w /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648 -v /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648:/home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648:rw,z -v /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648@tmp:/home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-lint cat $ docker top 415d53530fec5aed6c0040e81b3796a94de3e7f8cb1dcc4d15e5f1518126325d -eo pid,comm [Pipeline] { [Pipeline] script [Pipeline] { [Pipeline] } [Pipeline] // script [Pipeline] } $ docker stop --time=1 415d53530fec5aed6c0040e81b3796a94de3e7f8cb1dcc4d15e5f1518126325d $ docker rm -f 415d53530fec5aed6c0040e81b3796a94de3e7f8cb1dcc4d15e5f1518126325d [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } [Pipeline] // stage [Pipeline] stage [Pipeline] { (AuthenticationComment) [Pipeline] node Running on dgci-worker-permanent in /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648 [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Fetching changes from the remote Git repository Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh > git rev-parse --resolve-git-dir /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.25.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 + docker pull dgllib/dgl-ci-lint Using default tag: latest latest: Pulling from dgllib/dgl-ci-lint Digest: sha256:11b87b4425630143a2560fe6ad584f245ee0a86bc58b40cda3c2be93656b0d5e Status: Image is up to date for dgllib/dgl-ci-lint:latest docker.io/dgllib/dgl-ci-lint:latest [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker inspect -f . dgllib/dgl-ci-lint . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dgci-worker-permanent does not seem to be running inside a container $ docker run -t -d -u 0:0 -w /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648 -v /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648:/home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648:rw,z -v /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648@tmp:/home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-lint cat $ docker top b4f3029846f5165bf12c85fd7be505bbf78db783074ae1a07b14b774d1eb3855 -eo pid,comm [Pipeline] { [Pipeline] } $ docker stop --time=1 b4f3029846f5165bf12c85fd7be505bbf78db783074ae1a07b14b774d1eb3855 $ docker rm -f b4f3029846f5165bf12c85fd7be505bbf78db783074ae1a07b14b774d1eb3855 [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node Stage "AuthenticationComment" skipped due to when conditional [Pipeline] } [Pipeline] // stage [Pipeline] stage [Pipeline] { (Regression Test) [Pipeline] node Running on dgci-worker-permanent in /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648 [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Fetching changes from the remote Git repository Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.25.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker pull dgllib/dgl-ci-lint Using default tag: latest latest: Pulling from dgllib/dgl-ci-lint Digest: sha256:11b87b4425630143a2560fe6ad584f245ee0a86bc58b40cda3c2be93656b0d5e Status: Image is up to date for dgllib/dgl-ci-lint:latest docker.io/dgllib/dgl-ci-lint:latest [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker inspect -f . dgllib/dgl-ci-lint . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dgci-worker-permanent does not seem to be running inside a container $ docker run -t -d -u 0:0 -w /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648 -v /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648:/home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648:rw,z -v /home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648@tmp:/home/ubuntu/jenkins_benchmark/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-lint cat $ docker top 59fbbb555874ff8ca9167ae061ff4e42e3c3eeed473ccd147a34d4e5fe5fb8fe -eo pid,comm [Pipeline] { [Pipeline] } $ docker stop --time=1 59fbbb555874ff8ca9167ae061ff4e42e3c3eeed473ccd147a34d4e5fe5fb8fe $ docker rm -f 59fbbb555874ff8ca9167ae061ff4e42e3c3eeed473ccd147a34d4e5fe5fb8fe [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node Stage "Regression Test" skipped due to when conditional [Pipeline] } [Pipeline] // stage [Pipeline] stage [Pipeline] { (CI) [Pipeline] stage [Pipeline] { (Lint Check) [Pipeline] node Running on dgl-manual-large-cpu in /root/jenkins/workspace/dgl_PR-4648 [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker pull dgllib/dgl-ci-lint Using default tag: latest latest: Pulling from dgllib/dgl-ci-lint Digest: sha256:11b87b4425630143a2560fe6ad584f245ee0a86bc58b40cda3c2be93656b0d5e Status: Image is up to date for dgllib/dgl-ci-lint:latest docker.io/dgllib/dgl-ci-lint:latest [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker inspect -f . dgllib/dgl-ci-lint . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dgl-manual-large-cpu does not seem to be running inside a container $ docker run -t -d -u 0:0 -w /root/jenkins/workspace/dgl_PR-4648 -v /root/jenkins/workspace/dgl_PR-4648:/root/jenkins/workspace/dgl_PR-4648:rw,z -v /root/jenkins/workspace/dgl_PR-4648@tmp:/root/jenkins/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-lint cat $ docker top bb5e029d386aa285458fa71b74bfdb1eafdfce2f99026c9cffde6d1346714b02 -eo pid,comm [Pipeline] { [Pipeline] sh + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@3d5a0fa4; decorates RemoteLauncher[hudson.remoting.Channel@1d9b9638:dgl-manual-large-cpu] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] sh + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nccl'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/phmap'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] sh + bash tests/scripts/task_lint.sh Checking code style of C++ codes... =====113/113 cpp-header files passed check===== =====176/176 cpp-source files passed check===== All passed! Checking code style of python codes... Report ====== 19102 statements analysed. Statistics by type ------------------ +---------+-------+-----------+-----------+------------+---------+ |type |number |old number |difference |%documented |%badname | +=========+=======+===========+===========+============+=========+ |module |229 |NC |NC |100.00 |0.00 | +---------+-------+-----------+-----------+------------+---------+ |class |343 |NC |NC |99.71 |1.46 | +---------+-------+-----------+-----------+------------+---------+ |method |1521 |NC |NC |99.61 |0.33 | +---------+-------+-----------+-----------+------------+---------+ |function |663 |NC |NC |100.00 |1.06 | +---------+-------+-----------+-----------+------------+---------+ External dependencies --------------------- :: dgl (dgl.distgnn.tools.tools) \-_api_internal (dgl._ffi.object) \-_dataloading | \-dataloader (dgl._dataloading.pytorch.dataloader) \-_deprecate | \-graph (dgl.transforms.functional) | \-nodeflow (dgl.network) | \-runtime | \-ir (dgl.function.message) | \-var (dgl.function.message) \-_ffi | \-_ctypes | | \-function (dgl._ffi.function) | | \-ndarray (dgl._ffi._ctypes.function) | | \-object (dgl._ffi._ctypes.function) | | \-types (dgl._ffi._ctypes.ndarray) | \-base (dgl._ffi._ctypes.ndarray) | \-function (dgl.transforms.functional) | \-libinfo (dgl._ffi.base) | \-ndarray (dgl.distributed.shared_mem_utils) | \-object (dgl.distributed.rpc) | \-object_generic (dgl._ffi._ctypes.function) | \-runtime_ctypes (dgl._ffi._ctypes.ndarray) | \-streams (dgl.heterograph_index) \-backend (dgl.transforms.functional) | \-pytorch (dgl.nn.pytorch.sparse_emb) \-base (dgl.transforms.functional) \-batch (dgl.transforms.functional) \-convert (dgl.transforms.functional) \-core (dgl.heterograph) \-data | \-utils (dgl.distgnn.tools.tools) \-dataloading | \-base (dgl.dataloading.dataloader) \-distributed (dgl._dataloading.pytorch.dataloader) | \-constants (dgl.distributed.rpc) | \-dist_context (dgl.distributed.dist_tensor) | \-dist_tensor (dgl.distributed.optim.pytorch.sparse_optim) | \-id_map (dgl.distributed.graph_partition_book) | \-kvstore (dgl.distributed.dist_tensor) | \-nn | | \-pytorch (dgl.distributed.optim.pytorch.sparse_optim) | \-optim | | \-pytorch | | \-utils (dgl.distributed.optim.pytorch.sparse_optim) | \-role (dgl.distributed.dist_tensor) | \-rpc (dgl.distributed.dist_tensor) | \-shared_mem_utils (dgl.distributed.dist_graph) | \-standalone_kvstore (dgl.distributed.kvstore) \-frame (dgl.transforms.functional) \-function (dgl.transforms.functional) | \-base (dgl.function.message) \-geometry | \-capi (dgl.geometry.fps) \-graph_index (dgl.heterograph) \-heterograph (dgl.transforms.functional) \-heterograph_index (dgl.transforms.functional) \-init (dgl.frame) \-mock_sparse | \-diag_matrix (dgl.mock_sparse.elementwise_op_sp) | \-sp_matrix (dgl.mock_sparse.elementwise_op_sp) \-multiprocessing (dgl.dataloading.dataloader) \-ndarray (dgl.transforms.functional) \-nn | \-functional (dgl.nn.tensorflow.conv.gatconv) | \-mxnet | | \-utils (dgl.nn.mxnet.conv.relgraphconv) | \-pytorch (dgl.optim.pytorch.sparse_optim) | | \-conv | | | \-graphconv (dgl.nn.pytorch.conv.sgconv) | | \-linear (dgl.nn.pytorch.conv.hgtconv) | | \-softmax (dgl.nn.pytorch.conv.hgtconv) | | \-utils (dgl.nn.pytorch.conv.gatconv) | \-tensorflow | \-utils (dgl.nn.tensorflow.conv.gatconv) \-ops (dgl.nn.pytorch.linear) \-partition (dgl.transforms.functional) \-random (dgl.distributed.partition) \-readout (dgl.nn.tensorflow.glob) \-sampling (dgl.distributed.graph_services) | \-neighbor (dgl.transforms.functional) | \-utils (dgl.sampling.neighbor) \-sparse (dgl.distgnn.partition.libra_partition) \-storages (dgl.dataloading.dataloader) | \-base (dgl.storages.numpy) \-subgraph (dgl.transforms.functional) \-transforms (dgl.distributed.dist_graph) | \-functional (dgl.transforms.module) \-traversal (dgl.propagate) \-udf (dgl.core) \-utils (dgl.transforms.functional) | \-checks (dgl.utils.data) \-view (dgl.heterograph) Raw metrics ----------- +----------+-------+------+---------+-----------+ |type |number |% |previous |difference | +==========+=======+======+=========+===========+ |code |22834 |34.48 |NC |NC | +----------+-------+------+---------+-----------+ |docstring |36999 |55.88 |NC |NC | +----------+-------+------+---------+-----------+ |comment |1978 |2.99 |NC |NC | +----------+-------+------+---------+-----------+ |empty |4404 |6.65 |NC |NC | +----------+-------+------+---------+-----------+ Duplication ----------- +-------------------------+------+---------+-----------+ | |now |previous |difference | +=========================+======+=========+===========+ |nb duplicated lines |0 |NC |NC | +-------------------------+------+---------+-----------+ |percent duplicated lines |0.000 |NC |NC | +-------------------------+------+---------+-----------+ Messages by category -------------------- +-----------+-------+---------+-----------+ |type |number |previous |difference | +===========+=======+=========+===========+ |convention |0 |NC |NC | +-----------+-------+---------+-----------+ |refactor |0 |NC |NC | +-----------+-------+---------+-----------+ |warning |0 |NC |NC | +-----------+-------+---------+-----------+ |error |0 |NC |NC | +-----------+-------+---------+-----------+ ------------------------------------ Your code has been rated at 10.00/10 Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 bb5e029d386aa285458fa71b74bfdb1eafdfce2f99026c9cffde6d1346714b02 $ docker rm -f bb5e029d386aa285458fa71b74bfdb1eafdfce2f99026c9cffde6d1346714b02 [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } [Pipeline] // stage [Pipeline] stage [Pipeline] { (Build) [Pipeline] parallel [Pipeline] { (Branch: CPU Build) [Pipeline] { (Branch: GPU Build) [Pipeline] { (Branch: PyTorch Cugraph GPU Build) [Pipeline] { (Branch: CPU Build (Win64)) [Pipeline] stage [Pipeline] { (CPU Build) [Pipeline] stage [Pipeline] { (GPU Build) [Pipeline] stage [Pipeline] { (PyTorch Cugraph GPU Build) [Pipeline] stage [Pipeline] { (CPU Build (Win64)) [Pipeline] node Running on dglci-windows in C:\Jenkins\workspace\dgl_PR-4648 [Pipeline] node [Pipeline] node Running on dgl-manual-large-cpu in /root/jenkins/workspace/dgl_PR-4648 Running on dgl-manual-large-cpu in /root/jenkins/workspace/dgl_PR-4648@2 [Pipeline] node Running on dgl-manual-large-cpu in /root/jenkins/workspace/dgl_PR-4648@3 [Pipeline] { [Pipeline] { [Pipeline] { [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags [Pipeline] checkout The recommended git tool is: git [Pipeline] checkout using credential 150de63f-189c-4717-bcaf-010460d2f51a The recommended git tool is: git Cloning the remote Git repository Cloning with configured refspecs honoured and without tags [Pipeline] checkout using credential 150de63f-189c-4717-bcaf-010460d2f51a The recommended git tool is: git Cloning the remote Git repository Cloning with configured refspecs honoured and without tags using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags Cloning repository https://github.com/dmlc/dgl.git > git init C:\Jenkins\workspace\dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.20.0.windows.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648@2 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648@3 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Cleaning workspace Fetching without tags Cleaning workspace Fetching without tags > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 Cleaning workspace > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 Fetching without tags > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Commit message: "fix for pytorch < 1.12" Cleaning workspace Commit message: "fix for pytorch < 1.12" Cleaning workspace Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse "HEAD^{commit}" # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 [Pipeline] withEnv [Pipeline] { > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] withEnv [Pipeline] { [Pipeline] sh [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker pull dgllib/dgl-ci-gpu:cu101_v220816 + docker pull dgllib/dgl-ci-cpu:v220816 Commit message: "fix for pytorch < 1.12" Cleaning workspace + docker pull rapidsai/cugraph_nightly_torch-cuda:11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 cu101_v220816: Pulling from dgllib/dgl-ci-gpu Digest: sha256:ca40fc52876a2563a4e904d0c271d658c1acc8e6a4f8611b578bb49f8c7fd925 Status: Image is up to date for dgllib/dgl-ci-gpu:cu101_v220816 docker.io/dgllib/dgl-ci-gpu:cu101_v220816 [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { v220816: Pulling from dgllib/dgl-ci-cpu Digest: sha256:64b385c33b44dc57cb96ff264a84d8dfb8ced0caa9b30fbc4cec6d5ee511b099 Status: Image is up to date for dgllib/dgl-ci-cpu:v220816 docker.io/dgllib/dgl-ci-cpu:v220816 [Pipeline] } [Pipeline] // withEnv [Pipeline] sh [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] withEnv [Pipeline] { 11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10: Pulling from rapidsai/cugraph_nightly_torch-cuda Digest: sha256:72e8cb2632449beac4895f24b55018dea21f79da47fe39be05ef83a4fd3ddb67 Status: Image is up to date for rapidsai/cugraph_nightly_torch-cuda:11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10 docker.io/rapidsai/cugraph_nightly_torch-cuda:11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10 [Pipeline] sh [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a [Pipeline] sh Fetching changes from the remote Git repository Cleaning workspace + docker inspect -f . dgllib/dgl-ci-gpu:cu101_v220816 . [Pipeline] } Fetching without tags [Pipeline] // withEnv [Pipeline] withDockerContainer + docker inspect -f . dgllib/dgl-ci-cpu:v220816 . dgl-manual-large-cpu does not seem to be running inside a container + docker inspect -f . rapidsai/cugraph_nightly_torch-cuda:11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10 . $ docker run -t -d -u 0:0 -u root -w /root/jenkins/workspace/dgl_PR-4648@2 -v /root/jenkins/workspace/dgl_PR-4648@2:/root/jenkins/workspace/dgl_PR-4648@2:rw,z -v /root/jenkins/workspace/dgl_PR-4648@2@tmp:/root/jenkins/workspace/dgl_PR-4648@2@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-gpu:cu101_v220816 cat Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir C:\Jenkins\workspace\dgl_PR-4648\.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.20.0.windows.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse "HEAD^{commit}" # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 $ docker top 681e11859cd167f00c1889ac9c7c8c96d552be64f67c250e1cf0b5bc1d83bafe -eo pid,comm [Pipeline] { [Pipeline] } [Pipeline] bat [Pipeline] } [Pipeline] // withEnv [Pipeline] // withEnv [Pipeline] withDockerContainer dgl-manual-large-cpu does not seem to be running inside a container $ docker run -t -d -u 0:0 -u root -w /root/jenkins/workspace/dgl_PR-4648 -v /root/jenkins/workspace/dgl_PR-4648:/root/jenkins/workspace/dgl_PR-4648:rw,z -v /root/jenkins/workspace/dgl_PR-4648@tmp:/root/jenkins/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-cpu:v220816 cat administrator@WIN-O7QS55HVSDB C:\Jenkins\workspace\dgl_PR-4648>git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/METIS'... $ docker top 856b09e8bd127c677672426c8af0a6bfefd91dd73a4c28827d4d41ef0f5d0fcc -eo pid,comm [Pipeline] { [Pipeline] withDockerContainer dgl-manual-large-cpu does not seem to be running inside a container $ docker run -t -d -u 0:0 -u root -w /root/jenkins/workspace/dgl_PR-4648@3 -v /root/jenkins/workspace/dgl_PR-4648@3:/root/jenkins/workspace/dgl_PR-4648@3:rw,z -v /root/jenkins/workspace/dgl_PR-4648@3@tmp:/root/jenkins/workspace/dgl_PR-4648@3@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** rapidsai/cugraph_nightly_torch-cuda:11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10 cat $ docker top 4e33dd6e703ed0dd95f590a393973aab145ae4e5b4395a56a04d85836079138d -eo pid,comm [Pipeline] { [Pipeline] sh [Pipeline] sh [Pipeline] sh + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@1b0f444b; decorates RemoteLauncher[hudson.remoting.Channel@1d9b9638:dgl-manual-large-cpu] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@106288df; decorates RemoteLauncher[hudson.remoting.Channel@1d9b9638:dgl-manual-large-cpu] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@314ff248; decorates RemoteLauncher[hudson.remoting.Channel@1d9b9638:dgl-manual-large-cpu] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace Fetching without tags Fetching without tags Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648@2/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 Commit message: "fix for pytorch < 1.12" Cleaning workspace Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648@3/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] sh [Pipeline] sh [Pipeline] sh + git submodule update --recursive --init + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/METIS'... > git clean -fdx # timeout=10 > git clean -fdx # timeout=10 Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS'... + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/METIS'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/libxsmm'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/libxsmm'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/dlpack'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/nccl'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/googletest'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/phmap'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/thrust'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/nccl'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/phmap'... Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/nccl'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/phmap'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe'... Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/thrust'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/thrust'... Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tvm'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nccl'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/phmap'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/xbyak'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/xbyak'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/METIS/GKlib'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/METIS/GKlib'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/libuv'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe/third_party/libuv'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe/third_party/pybind11'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/thrust/dependencies/cub'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/thrust/dependencies/cub'... Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] bat Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tvm/3rdparty/dlpack'... Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/dlpack'... administrator@WIN-O7QS55HVSDB C:\Jenkins\workspace\dgl_PR-4648>CALL tests\scripts\build_dgl.bat ********************************************************************** ** Visual Studio 2019 Developer Command Prompt v16.5.5 ** Copyright (c) 2019 Microsoft Corporation ********************************************************************** Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... [vcvarsall.bat] Environment initialized for: 'x64' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tvm/3rdparty/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/dmlc-core'... created virtual environment CPython3.6.7.final.0-64 in 345ms creator CPython3Windows(dest=C:\Users\Administrator\Envs\jenkins-dgl-PR-4648-2, clear=False, global=True) seeder FromAppData(download=False, pip=bundle, setuptools=bundle, wheel=bundle, via=copy, app_data_dir=C:\Users\Administrator\AppData\Local\pypa\virtualenv) added seed packages: pip==21.3.1, setuptools==59.6.0, wheel==0.37.1 activators BashActivator,BatchActivator,FishActivator,PowerShellActivator,PythonActivator,XonshActivator Deleted file - C:\Jenkins\workspace\dgl_PR-4648\third_party\libxsmm\BUILD Deleted file - C:\Jenkins\workspace\dgl_PR-4648\third_party\libxsmm\samples\hello\BUILD Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tvm/3rdparty/rang'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/rang'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tvm/3rdparty/vta-hw'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/vta-hw'... Could Not Find C:\Jenkins\workspace\dgl_PR-4648\_download -- Selecting Windows SDK version 10.0.18362.0 to target Windows 6.3.9600. Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] sh [Pipeline] sh + bash tests/scripts/build_dgl.sh cugraph + bash tests/scripts/build_dgl.sh gpu ~/jenkins/workspace/dgl_PR-4648@2/build ~/jenkins/workspace/dgl_PR-4648@2 -- The C compiler identification is GNU 7.5.0 -- The CXX compiler identification is GNU 7.5.0 -- Check for working C compiler: /usr/bin/cc -- The C compiler identification is MSVC 19.25.28614.0 ~/jenkins/workspace/dgl_PR-4648@3/build ~/jenkins/workspace/dgl_PR-4648@3 -- The C compiler identification is GNU 7.5.0 Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' -- Check for working C compiler: /usr/bin/cc -- works -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- The CXX compiler identification is MSVC 19.25.28614.0 -- The CXX compiler identification is GNU 7.5.0 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' -- Detecting C compile features -- Detecting C compile features - done -- Check for working CXX compiler: /usr/bin/c++ -- Check for working CXX compiler: /usr/bin/c++ -- works -- Detecting CXX compiler ABI info -- Detecting C compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Start configuring project dgl -- Build with CUDA support -- Performing Test CMAKE_HAVE_LIBC_PTHREAD Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... -- Detecting CXX compiler ABI info - done -- Detecting CXX compile features -- Detecting CXX compile features - done -- Start configuring project dgl -- Build with CUDA support -- Looking for pthread.h -- Looking for pthread.h - found -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed -- Looking for pthread_create in pthreads -- Looking for pthread_create in pthreads - not found -- Looking for pthread_create in pthread -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed -- Looking for pthread_create in pthreads -- Looking for pthread_create in pthreads - not found -- Looking for pthread_create in pthread -- Detecting C compiler ABI info - done -- Check for working C compiler: C:/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC/14.25.28610/bin/Hostx64/x64/cl.exe - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Looking for pthread_create in pthread - found -- Found Threads: TRUE -- Found CUDA: /usr/local/cuda (found version "11.5") -- Found CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda -- Found CUDA_CUDART_LIBRARY=/usr/local/cuda/lib64/libcudart.so -- Found CUDA_CUBLAS_LIBRARY=/opt/conda/lib/libcublas.so -- Found CUDA_CURAND_LIBRARY=/opt/conda/lib/libcurand.so -- Performing Test SUPPORT_CXX14 -- Looking for pthread_create in pthread - found -- Found Threads: TRUE -- Found CUDA: /usr/local/cuda (found version "10.1") -- Found CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda -- Found CUDA_CUDART_LIBRARY=/usr/local/cuda/lib64/libcudart.so -- Found CUDA_CUBLAS_LIBRARY=/usr/lib/x86_64-linux-gnu/libcublas.so -- Found CUDA_CURAND_LIBRARY=/usr/local/cuda/lib64/libcurand.so -- Performing Test SUPPORT_CXX14 -- Performing Test SUPPORT_CXX14 - Success -- Use external CUB/Thrust library for a consistent API and performance. Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... -- Performing Test SUPPORT_CXX14 - Success -- Use external CUB/Thrust library for a consistent API and performance. -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: C:/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC/14.25.28610/bin/Hostx64/x64/cl.exe - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Start configuring project dgl -- Found OpenMP_C: -fopenmp (found version "4.5") -- Found OpenMP_CXX: -fopenmp (found version "4.5") -- Found OpenMP: TRUE (found version "4.5") -- Build with OpenMP. -- Build with LIBXSMM optimization. -- Looking for sys/epoll.h -- Found OpenMP_C: -fopenmp (found version "4.5") -- Found OpenMP_CXX: -fopenmp (found version "4.5") -- Found OpenMP: TRUE (found version "4.5") -- Build with OpenMP. -- Build with LIBXSMM optimization. -- Looking for sys/epoll.h -- Looking for sys/epoll.h - found -- -fopenmp -O2 -Wall -fPIC -std=c++14 -DUSE_AVX -DUSE_LIBXSMM -DDGL_CPU_LLC_SIZE=40000000 -DUSE_EPOLL -DIDXTYPEWIDTH=64 -DREALTYPEWIDTH=32 -- Running GPU architecture autodetection -- Looking for sys/epoll.h - found -- -fopenmp -O2 -Wall -fPIC -std=c++14 -DUSE_AVX -DUSE_LIBXSMM -DDGL_CPU_LLC_SIZE=40000000 -DUSE_EPOLL -DIDXTYPEWIDTH=64 -DREALTYPEWIDTH=32 -- Running GPU architecture autodetection nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' CMake Warning at cmake/modules/CUDA.cmake:71 (message): Running GPU detection script with nvcc failed: Call Stack (most recent call first): cmake/modules/CUDA.cmake:132 (dgl_detect_installed_gpus) cmake/modules/CUDA.cmake:266 (dgl_select_nvcc_arch_flags) CMakeLists.txt:170 (dgl_config_cuda) CMake Warning at cmake/modules/CUDA.cmake:76 (message): Automatic GPU detection failed. Building for all known architectures (35 50 60 70). Call Stack (most recent call first): cmake/modules/CUDA.cmake:132 (dgl_detect_installed_gpus) cmake/modules/CUDA.cmake:266 (dgl_select_nvcc_arch_flags) CMakeLists.txt:170 (dgl_config_cuda) -- CUDA flags: -Xcompiler ,-fopenmp,-O2,-Wall,-fPIC,-std=c++14,,-DUSE_AVX,-DUSE_LIBXSMM,-DDGL_CPU_LLC_SIZE=40000000,-DUSE_EPOLL,-DIDXTYPEWIDTH=64,-DREALTYPEWIDTH=32;--expt-relaxed-constexpr;-gencode;arch=compute_35,code=sm_35;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;--expt-extended-lambda;-Wno-deprecated-declarations;-std=c++14 -- Found OpenMP_C: -fopenmp (found version "4.5") -- Found OpenMP_CXX: -fopenmp (found version "4.5") -- Looking for clock_gettime in rt -- Looking for clock_gettime in rt - found -- Looking for fopen64 -- Found OpenMP_C: -openmp (found version "2.0") CMake Warning at cmake/modules/CUDA.cmake:71 (message): Running GPU detection script with nvcc failed: Call Stack (most recent call first): cmake/modules/CUDA.cmake:132 (dgl_detect_installed_gpus) cmake/modules/CUDA.cmake:266 (dgl_select_nvcc_arch_flags) CMakeLists.txt:170 (dgl_config_cuda) CMake Warning at cmake/modules/CUDA.cmake:76 (message): Automatic GPU detection failed. Building for all known architectures (35 50 60 70 80). Call Stack (most recent call first): cmake/modules/CUDA.cmake:132 (dgl_detect_installed_gpus) cmake/modules/CUDA.cmake:266 (dgl_select_nvcc_arch_flags) CMakeLists.txt:170 (dgl_config_cuda) -- CUDA flags: -Xcompiler ,-fopenmp,-O2,-Wall,-fPIC,-std=c++14,,-DUSE_AVX,-DUSE_LIBXSMM,-DDGL_CPU_LLC_SIZE=40000000,-DUSE_EPOLL,-DIDXTYPEWIDTH=64,-DREALTYPEWIDTH=32;--expt-relaxed-constexpr;-gencode;arch=compute_35,code=sm_35;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_80,code=sm_80;--expt-extended-lambda;-Wno-deprecated-declarations;-std=c++14 -- Found OpenMP_C: -fopenmp (found version "4.5") -- Found OpenMP_CXX: -fopenmp (found version "4.5") -- Looking for clock_gettime in rt -- Looking for clock_gettime in rt - found -- Looking for fopen64 -- Looking for fopen64 - not found -- Looking for C++ include cxxabi.h -- Looking for C++ include cxxabi.h - found -- Looking for nanosleep Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... -- Looking for fopen64 - not found -- Looking for C++ include cxxabi.h -- Looking for C++ include cxxabi.h - found -- Looking for nanosleep -- Looking for nanosleep - found -- Looking for backtrace -- Looking for backtrace - found -- backtrace facility detected in default set of libraries -- Found Backtrace: /usr/include -- Check if the system is big endian -- Searching 16 bit integer -- Looking for sys/types.h -- Found OpenMP_CXX: -openmp (found version "2.0") -- Found OpenMP: TRUE (found version "2.0") -- Build with OpenMP. -- Build with LIBXSMM optimization. -- Looking for clock_gettime in rt -- Looking for nanosleep - found -- Looking for backtrace -- Looking for sys/types.h - found -- Looking for stdint.h -- Looking for stdint.h - found -- Looking for stddef.h Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... -- Looking for clock_gettime in rt - not found -- Looking for fopen64 -- Looking for backtrace - found -- backtrace facility detected in default set of libraries -- Found Backtrace: /usr/include -- /root/jenkins/workspace/dgl_PR-4648@3/third_party/dmlc-core/cmake/build_config.h.in -> include/dmlc/build_config.h -- Performing Test SUPPORT_MSSE2 -- Performing Test SUPPORT_MSSE2 - Success -- Looking for execinfo.h -- Looking for stddef.h - found -- Check size of unsigned short -- Check size of unsigned short - done -- Using unsigned short -- Looking for execinfo.h - found -- Looking for getline -- Looking for getline - found -- Check if the system is big endian - little endian -- /root/jenkins/workspace/dgl_PR-4648@2/third_party/dmlc-core/cmake/build_config.h.in -> include/dmlc/build_config.h -- Performing Test SUPPORT_MSSE2 -- Performing Test SUPPORT_MSSE2 - Success -- Looking for execinfo.h Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... -- Performing Test UV_LINT_W4 -- Performing Test UV_LINT_W4 - Failed -- Performing Test UV_LINT_NO_UNUSED_PARAMETER_MSVC -- Performing Test UV_LINT_NO_UNUSED_PARAMETER_MSVC - Failed -- Performing Test UV_LINT_NO_CONDITIONAL_CONSTANT_MSVC -- Looking for fopen64 - not found -- Looking for C++ include cxxabi.h -- Looking for execinfo.h - found -- Looking for getline -- Looking for getline - found -- Performing Test UV_LINT_NO_CONDITIONAL_CONSTANT_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_MSVC -- Performing Test UV_LINT_NO_NONSTANDARD_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_EMPTY_TU_MSVC -- Performing Test UV_LINT_NO_NONSTANDARD_EMPTY_TU_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_FILE_SCOPE_MSVC -- Performing Test UV_LINT_NO_NONSTANDARD_FILE_SCOPE_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_NONSTATIC_DLIMPORT_MSVC -- Performing Test UV_LINT_NO_NONSTANDARD_NONSTATIC_DLIMPORT_MSVC - Failed -- Performing Test UV_LINT_NO_HIDES_LOCAL -- Looking for C++ include cxxabi.h - not found -- Looking for nanosleep -- Performing Test UV_LINT_W4 -- Performing Test UV_LINT_W4 - Failed -- Performing Test UV_LINT_NO_UNUSED_PARAMETER_MSVC -- Performing Test UV_LINT_NO_UNUSED_PARAMETER_MSVC - Failed -- Performing Test UV_LINT_NO_CONDITIONAL_CONSTANT_MSVC -- Performing Test UV_LINT_NO_CONDITIONAL_CONSTANT_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_MSVC -- Performing Test UV_LINT_NO_NONSTANDARD_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_EMPTY_TU_MSVC -- Performing Test UV_LINT_NO_NONSTANDARD_EMPTY_TU_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_FILE_SCOPE_MSVC -- Performing Test UV_LINT_NO_NONSTANDARD_FILE_SCOPE_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_NONSTATIC_DLIMPORT_MSVC -- Performing Test UV_LINT_NO_HIDES_LOCAL - Failed -- Performing Test UV_LINT_NO_HIDES_PARAM -- Performing Test UV_LINT_NO_HIDES_PARAM - Failed -- Performing Test UV_LINT_NO_HIDES_GLOBAL -- Performing Test UV_LINT_NO_HIDES_GLOBAL - Failed -- Performing Test UV_LINT_NO_CONDITIONAL_ASSIGNMENT_MSVC -- Performing Test UV_LINT_NO_CONDITIONAL_ASSIGNMENT_MSVC - Failed -- Performing Test UV_LINT_NO_UNSAFE_MSVC -- Performing Test UV_LINT_NO_UNSAFE_MSVC - Failed -- Performing Test UV_LINT_WALL -- Looking for nanosleep - not found -- Looking for backtrace Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... -- Performing Test UV_LINT_NO_NONSTANDARD_NONSTATIC_DLIMPORT_MSVC - Failed -- Performing Test UV_LINT_NO_HIDES_LOCAL -- Performing Test UV_LINT_NO_HIDES_LOCAL - Failed -- Performing Test UV_LINT_NO_HIDES_PARAM -- Performing Test UV_LINT_NO_HIDES_PARAM - Failed -- Performing Test UV_LINT_NO_HIDES_GLOBAL -- Performing Test UV_LINT_NO_HIDES_GLOBAL - Failed -- Performing Test UV_LINT_NO_CONDITIONAL_ASSIGNMENT_MSVC -- Performing Test UV_LINT_NO_CONDITIONAL_ASSIGNMENT_MSVC - Failed -- Performing Test UV_LINT_NO_UNSAFE_MSVC -- Performing Test UV_LINT_NO_UNSAFE_MSVC - Failed -- Performing Test UV_LINT_WALL -- Performing Test UV_LINT_WALL - Success -- Performing Test UV_LINT_NO_UNUSED_PARAMETER -- Performing Test UV_LINT_WALL - Success -- Performing Test UV_LINT_NO_UNUSED_PARAMETER -- Performing Test UV_LINT_NO_UNUSED_PARAMETER - Success -- Performing Test UV_LINT_STRICT_PROTOTYPES -- Performing Test UV_LINT_NO_UNUSED_PARAMETER - Success -- Performing Test UV_LINT_STRICT_PROTOTYPES -- Performing Test UV_LINT_STRICT_PROTOTYPES - Success -- Performing Test UV_LINT_EXTRA -- Looking for backtrace - not found -- Could NOT find Backtrace (missing: Backtrace_LIBRARY Backtrace_INCLUDE_DIR) -- Check if the system is big endian -- Searching 16 bit integer -- Looking for sys/types.h Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' -- Performing Test UV_LINT_STRICT_PROTOTYPES - Success -- Performing Test UV_LINT_EXTRA -- Performing Test UV_LINT_EXTRA - Success -- Performing Test UV_LINT_UTF8_MSVC -- Performing Test UV_LINT_UTF8_MSVC - Failed -- Performing Test UV_F_STRICT_ALIASING -- Performing Test UV_LINT_EXTRA - Success -- Performing Test UV_LINT_UTF8_MSVC -- Performing Test UV_LINT_UTF8_MSVC - Failed -- Performing Test UV_F_STRICT_ALIASING Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' -- Performing Test UV_F_STRICT_ALIASING - Success -- summary of build options: Install prefix: /usr/local Target system: Linux Compiler: C compiler: /usr/bin/cc CFLAGS: -fopenmp -O2 -fPIC -DUSE_AVX -DUSE_LIBXSMM -DDGL_CPU_LLC_SIZE=40000000 -DUSE_EPOLL -DIDXTYPEWIDTH=64 -DREALTYPEWIDTH=32 -DLINUX -D_FILE_OFFSET_BITS=64 -std=c99 -fno-strict-aliasing -march=native -fPIC -Werror -Wno-unused-function -Wno-unused-but-set-variable -Wno-unused-variable -Wno-unknown-pragmas -DNDEBUG -DNDEBUG2 -DHAVE_EXECINFO_H -DHAVE_GETLINE -O3 -- Found uv: 1.38.1 (found version "1.38.1") -- Build with unittest -- Found PythonInterp: /usr/bin/python3.6 (found version "3.6.9") -- Building dist/rpc tests -- Configuring done -- Performing Test UV_F_STRICT_ALIASING - Success -- summary of build options: Install prefix: /usr/local Target system: Linux Compiler: C compiler: /usr/bin/cc CFLAGS: -fopenmp -O2 -fPIC -DUSE_AVX -DUSE_LIBXSMM -DDGL_CPU_LLC_SIZE=40000000 -DUSE_EPOLL -DIDXTYPEWIDTH=64 -DREALTYPEWIDTH=32 -DLINUX -D_FILE_OFFSET_BITS=64 -std=c99 -fno-strict-aliasing -march=native -fPIC -Werror -Wno-unused-function -Wno-unused-but-set-variable -Wno-unused-variable -Wno-unknown-pragmas -DNDEBUG -DNDEBUG2 -DHAVE_EXECINFO_H -DHAVE_GETLINE -O3 -- Found uv: 1.38.1 (found version "1.38.1") -- Build with unittest CMake Deprecation Warning at third_party/googletest/CMakeLists.txt:4 (cmake_minimum_required): Compatibility with CMake < 2.8.12 will be removed from a future version of CMake. Update the VERSION argument value or use a ... suffix to tell CMake that the project does not need compatibility with older versions. CMake Deprecation Warning at third_party/googletest/googlemock/CMakeLists.txt:45 (cmake_minimum_required): Compatibility with CMake < 2.8.12 will be removed from a future version of CMake. Update the VERSION argument value or use a ... suffix to tell CMake that the project does not need compatibility with older versions. CMake Deprecation Warning at third_party/googletest/googletest/CMakeLists.txt:56 (cmake_minimum_required): Compatibility with CMake < 2.8.12 will be removed from a future version of CMake. Update the VERSION argument value or use a ... suffix to tell CMake that the project does not need compatibility with older versions. -- Found PythonInterp: /opt/conda/bin/python (found version "3.9.13") -- Building dist/rpc tests -- Configuring done CMake Warning at /opt/conda/share/cmake-3.24/Modules/FindCUDA.cmake:1985 (add_library): Cannot generate a safe runtime search path for target dgl because files in some directories may conflict with libraries in implicit directories: runtime library [libgomp.so.1] in /usr/lib/gcc/x86_64-linux-gnu/7 may be hidden by files in: /opt/conda/lib Some of these libraries may not be found correctly. Call Stack (most recent call first): CMakeLists.txt:187 (cuda_add_library) CMake Warning at /opt/conda/share/cmake-3.24/Modules/FindCUDA.cmake:1985 (add_library): Cannot generate a safe runtime search path for target dgl because there is a cycle in the constraint graph: dir 0 is [/usr/local/cuda/lib64] dir 1 must precede it due to runtime library [libcurand.so.10] dir 1 is [/opt/conda/lib] dir 0 must precede it due to runtime library [libcudart.so.11.0] Some of these libraries may not be found correctly. Call Stack (most recent call first): CMakeLists.txt:187 (cuda_add_library) CMake Warning at CMakeLists.txt:334 (add_executable): Cannot generate a safe runtime search path for target runUnitTests because files in some directories may conflict with libraries in implicit directories: runtime library [libgomp.so.1] in /usr/lib/gcc/x86_64-linux-gnu/7 may be hidden by files in: /opt/conda/lib Some of these libraries may not be found correctly. CMake Warning at CMakeLists.txt:334 (add_executable): Cannot generate a safe runtime search path for target runUnitTests because there is a cycle in the constraint graph: dir 0 is [/root/jenkins/workspace/dgl_PR-4648@3/build] dir 1 is [/usr/local/cuda/lib64] dir 2 must precede it due to runtime library [libcurand.so.10] dir 2 is [/opt/conda/lib] dir 1 must precede it due to runtime library [libcudart.so.11.0] Some of these libraries may not be found correctly. CMake Warning at CMakeLists.txt:342 (add_executable): Cannot generate a safe runtime search path for target rpc_client because files in some directories may conflict with libraries in implicit directories: runtime library [libgomp.so.1] in /usr/lib/gcc/x86_64-linux-gnu/7 may be hidden by files in: /opt/conda/lib Some of these libraries may not be found correctly. CMake Warning at CMakeLists.txt:342 (add_executable): Cannot generate a safe runtime search path for target rpc_client because there is a cycle in the constraint graph: dir 0 is [/root/jenkins/workspace/dgl_PR-4648@3/build] dir 1 is [/usr/local/cuda/lib64] dir 2 must precede it due to runtime library [libcurand.so.10] dir 2 is [/opt/conda/lib] dir 1 must precede it due to runtime library [libcudart.so.11.0] Some of these libraries may not be found correctly. CMake Warning at CMakeLists.txt:345 (add_executable): Cannot generate a safe runtime search path for target rpc_server because files in some directories may conflict with libraries in implicit directories: runtime library [libgomp.so.1] in /usr/lib/gcc/x86_64-linux-gnu/7 may be hidden by files in: /opt/conda/lib Some of these libraries may not be found correctly. CMake Warning at CMakeLists.txt:345 (add_executable): Cannot generate a safe runtime search path for target rpc_server because there is a cycle in the constraint graph: dir 0 is [/root/jenkins/workspace/dgl_PR-4648@3/build] dir 1 is [/usr/local/cuda/lib64] dir 2 must precede it due to runtime library [libcurand.so.10] dir 2 is [/opt/conda/lib] dir 1 must precede it due to runtime library [libcudart.so.11.0] Some of these libraries may not be found correctly. Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' -- Generating done -- Build files have been written to: /root/jenkins/workspace/dgl_PR-4648@2/build Scanning dependencies of target nccl_external Scanning dependencies of target tensoradapter_pytorch Scanning dependencies of target libxsmm [ 0%] Creating directories for 'nccl_external' Scanning dependencies of target dmlc Scanning dependencies of target gtest [ 0%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/config.cc.o [ 0%] Building CXX object third_party/googletest/googletest/CMakeFiles/gtest.dir/src/gtest-all.cc.o [ 0%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/data.cc.o [ 0%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io.cc.o [ 1%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/indexed_recordio_split.cc.o [ 1%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/recordio.cc.o [ 2%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/input_split_base.cc.o [ 2%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/line_split.cc.o [ 2%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/recordio_split.cc.o [ 2%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/filesys.cc.o [ 2%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/local_filesys.cc.o Scanning dependencies of target tensorpipe_uv Scanning dependencies of target metis [ 2%] No download step for 'nccl_external' [ 3%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/fs-poll.c.o [ 3%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/inet.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/strscpy.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/random.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/idna.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/threadpool.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/uv-common.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/timer.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/async.c.o [ 4%] No patch step for 'nccl_external' [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/core.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/uv-data-getter-setters.c.o [ 6%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/version.c.o [ 6%] No update step for 'nccl_external' [ 6%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/b64.c.o [ 6%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/error.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/fs.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/getnameinfo.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/getaddrinfo.c.o [ 8%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/loop.c.o [ 9%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/blas.c.o [ 10%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/fkvkselect.c.o [ 10%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/loop-watcher.c.o [ 10%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/csr.c.o [ 10%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/dl.c.o [ 12%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/graph.c.o [ 12%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/getopt.c.o [ 12%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/process.c.o [ 12%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/pipe.c.o [ 12%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/pqueue.c.o [ 12%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/udp.c.o [ 12%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/io.c.o [ 12%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/fs.c.o [ 13%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/checkgraph.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/tcp.c.o [ 13%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/evaluate.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/proctitle.c.o [ 14%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/tty.c.o [ 15%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/linux-inotify.c.o [ 15%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/htable.c.o [ 15%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/contig.c.o [ 15%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/seq.c.o [ 15%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/kwayfm.c.o [ 16%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/random-sysctl-linux.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/gkregex.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/compress.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/gk_util.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/sort.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/memory.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/random.c.o [ 16%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/stream.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/string.c.o [ 16%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/poll.c.o [ 16%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/signal.c.o [ 18%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/linux-core.c.o [ 18%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/random-devurandom.c.o [ 18%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/thread.c.o [ 18%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/itemsets.c.o [ 18%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/linux-syscalls.c.o [ 19%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/procfs-exepath.c.o [ 19%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/tokenizer.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/mcore.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/coarsen.c.o [ 22%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/debug.c.o [ 22%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/options.c.o [ 22%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/kmetis.c.o [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/rw.c.o [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mmd.c.o [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/timers.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/gklib.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/auxapi.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/fm.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/frename.c.o [ 25%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/random-getrandom.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/balance.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/fortran.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/bucketsort.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/kwayrefine.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/ometis.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/pmetis.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mesh.c.o [Pipeline] sh -- Looking for sys/types.h - found -- Looking for stdint.h -- Generating done -- Build files have been written to: /root/jenkins/workspace/dgl_PR-4648@3/build [ 0%] Creating directories for 'nccl_external' [ 1%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/fs-poll.c.o [ 1%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/idna.c.o [ 1%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/inet.c.o [ 1%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/config.cc.o [ 1%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/threadpool.c.o [ 1%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/random.c.o [ 1%] Building CXX object third_party/googletest/googletest/CMakeFiles/gtest.dir/src/gtest-all.cc.o [ 1%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/b64.c.o [ 2%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/strscpy.c.o [ 3%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/recordio.cc.o [ 3%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/uv-data-getter-setters.c.o [ 3%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/uv-common.c.o [ 4%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/line_split.cc.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/version.c.o [ 4%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/data.cc.o [ 4%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io.cc.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/timer.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/async.c.o [ 6%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/blas.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/graph.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/refine.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/parmetis.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/meshpart.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/srefine.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mincover.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/sfm.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/minconn.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/initpart.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mcutil.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/separator.c.o [ 30%] No configure step for 'nccl_external' [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/timing.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/wspace.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/stat.c.o [ 31%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/util.c.o [ 31%] Performing build step for 'nccl_external' -- The C compiler identification is GNU 7.5.0 [ 6%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/recordio_split.cc.o [ 6%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/indexed_recordio_split.cc.o [ 6%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/error.c.o [ 6%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/core.c.o [ 6%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/evaluate.c.o [ 6%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/filesys.cc.o [ 7%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/input_split_base.cc.o [ 7%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/fs.c.o [ 7%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/getopt.c.o [ 8%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/fkvkselect.c.o [ 8%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/gk_util.c.o [ 8%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/local_filesys.cc.o [ 8%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/csr.c.o [ 9%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/fs.c.o [ 9%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/dl.c.o [ 9%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/getnameinfo.c.o [ 9%] No download step for 'nccl_external' [ 9%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/getaddrinfo.c.o [ 9%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/loop-watcher.c.o [ 10%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/loop.c.o [ 10%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/gkregex.c.o [ 12%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/graph.c.o [ 12%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/htable.c.o [ 12%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/pipe.c.o [ 12%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/io.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/random-devurandom.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/stream.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/poll.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/process.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/thread.c.o [ 14%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/mcore.c.o [ 14%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/tcp.c.o [ 14%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/itemsets.c.o -- The CXX compiler identification is GNU 7.5.0 -- Check for working C compiler: /usr/bin/cc -- Looking for stdint.h - found -- Looking for stddef.h + bash tests/scripts/build_dgl.sh cpu ~/jenkins/workspace/dgl_PR-4648/build ~/jenkins/workspace/dgl_PR-4648 -- The C compiler identification is GNU 7.5.0 [ 15%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/tty.c.o [ 15%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/proctitle.c.o [ 15%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/signal.c.o [ 15%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/linux-core.c.o [ 15%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/udp.c.o [ 15%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/pqueue.c.o [ 15%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/memory.c.o [ 16%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/linux-inotify.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/random.c.o [ 18%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/rw.c.o [ 18%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/sort.c.o [ 19%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/tokenizer.c.o [ 19%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/timers.c.o [ 19%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/auxapi.c.o [ 19%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/seq.c.o [ 19%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/linux-syscalls.c.o [ 19%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/string.c.o [ 19%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/procfs-exepath.c.o [ 20%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/random-sysctl-linux.c.o [ 20%] No update step for 'nccl_external' [ 20%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/random-getrandom.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/balance.c.o [ 21%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/debug.c.o [ 21%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/fm.c.o [ 21%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/fortran.c.o [ 22%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/compress.c.o [ 22%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/checkgraph.c.o [ 22%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/contig.c.o [ 22%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/coarsen.c.o [ 24%] No patch step for 'nccl_external' [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/frename.c.o [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/bucketsort.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/gklib.c.o [ 25%] No configure step for 'nccl_external' [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/initpart.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/kwayrefine.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mcutil.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mesh.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/meshpart.c.o [ 26%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/graph.c.o [ 27%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/minconn.c.o [ 27%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/kmetis.c.o [ 27%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/kwayfm.c.o [ 27%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mmd.c.o [ 27%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/ometis.c.o [ 27%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mincover.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/options.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/pmetis.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/refine.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/sfm.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/separator.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/srefine.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/parmetis.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/stat.c.o [ 31%] Performing build step for 'nccl_external' [ 31%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/timing.c.o [ 32%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/util.c.o [ 32%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/wspace.c.o Grabbing include/nccl_net.h > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/include/nccl_net.h Generating nccl.h.in > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/include/nccl.h Generating nccl.pc.in > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/lib/pkgconfig/nccl.pc -- Check for working C compiler: /usr/bin/cc -- works -- Detecting C compiler ABI info Compiling init.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/init.o Compiling channel.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/channel.o Compiling bootstrap.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/bootstrap.o Compiling transport.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/transport.o Compiling enqueue.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/enqueue.o Compiling group.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/group.o Compiling debug.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/debug.o -- Detecting C compiler ABI info - done Compiling proxy.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/proxy.o Compiling misc/nvmlwrap.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/misc/nvmlwrap.o -- Detecting C compile features -- Detecting C compile features - done -- Check for working CXX compiler: /usr/bin/c++ Compiling misc/ibvwrap.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/misc/ibvwrap.o Compiling misc/gdrwrap.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/misc/gdrwrap.o Compiling misc/utils.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/misc/utils.o Compiling misc/argcheck.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/misc/argcheck.o Compiling transport/p2p.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/transport/p2p.o Compiling transport/shm.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/transport/shm.o Compiling transport/net.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/transport/net.o Compiling transport/net_socket.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/transport/net_socket.o Compiling transport/net_ib.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/transport/net_ib.o Compiling transport/coll_net.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/transport/coll_net.o Compiling collectives/sendrecv.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/sendrecv.o Compiling collectives/all_reduce.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/all_reduce.o -- The CXX compiler identification is GNU 7.5.0 -- Check for working C compiler: /usr/bin/cc -- Check for working C compiler: /usr/bin/cc -- works -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Detecting C compile features -- Detecting C compile features - done -- Check for working CXX compiler: /usr/bin/c++ Generating nccl.h.in > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/include/nccl.h Grabbing include/nccl_net.h > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/include/nccl_net.h Generating nccl.pc.in > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/lib/pkgconfig/nccl.pc Compiling init.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/init.o Compiling channel.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/channel.o Compiling bootstrap.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/bootstrap.o Compiling transport.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/transport.o Compiling enqueue.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/enqueue.o Compiling group.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/group.o Compiling debug.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/debug.o Compiling proxy.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/proxy.o Compiling misc/nvmlwrap.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/misc/nvmlwrap.o Compiling misc/ibvwrap.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/misc/ibvwrap.o Compiling misc/gdrwrap.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/misc/gdrwrap.o Compiling misc/utils.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/misc/utils.o Compiling misc/argcheck.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/misc/argcheck.o Compiling transport/p2p.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/transport/p2p.o Compiling transport/shm.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/transport/shm.o Compiling transport/net.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/transport/net.o Compiling transport/net_socket.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/transport/net_socket.o Compiling transport/net_ib.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/transport/net_ib.o Compiling transport/coll_net.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/transport/coll_net.o Compiling collectives/sendrecv.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/sendrecv.o Compiling collectives/all_reduce.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/all_reduce.o Compiling collectives/all_gather.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/all_gather.o Compiling collectives/broadcast.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/broadcast.o Compiling collectives/reduce.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/reduce.o Compiling collectives/all_gather.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/all_gather.o -- Check for working CXX compiler: /usr/bin/c++ -- works Compiling collectives/broadcast.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/broadcast.o -- Detecting CXX compiler ABI info Compiling collectives/reduce.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/reduce.o Compiling collectives/reduce_scatter.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/reduce_scatter.o Compiling graph/topo.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/graph/topo.o Compiling graph/paths.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/graph/paths.o Compiling graph/search.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/graph/search.o Compiling graph/connect.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/graph/connect.o Compiling graph/rings.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/graph/rings.o Compiling graph/trees.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/graph/trees.o Compiling graph/tuning.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/graph/tuning.o Compiling graph/xml.cc > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/graph/xml.o -- Detecting CXX compiler ABI info - done -- Detecting CXX compile features [ 31%] Linking C static library libtensorpipe_uv.a -- Detecting CXX compile features - done -- Using Python interpreter: /opt/conda/envs/pytorch-ci/bin/python [ 31%] Built target tensorpipe_uv Generating rules > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/Makefile.rules -- Check for working CXX compiler: /usr/bin/c++ -- works -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Detecting CXX compile features -- Detecting CXX compile features - done -- Start configuring project dgl -- Performing Test SUPPORT_CXX14 -- Looking for stddef.h - found -- Check size of unsigned short Compiling collectives/reduce_scatter.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/reduce_scatter.o Compiling graph/topo.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/graph/topo.o Compiling graph/paths.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/graph/paths.o Compiling graph/search.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/graph/search.o Compiling graph/connect.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/graph/connect.o Compiling graph/rings.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/graph/rings.o Compiling graph/trees.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/graph/trees.o Compiling graph/tuning.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/graph/tuning.o Compiling graph/xml.cc > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/graph/xml.o [ 32%] Linking C static library libtensorpipe_uv.a Generating rules > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/Makefile.rules [ 32%] Built target tensorpipe_uv Scanning dependencies of target tensorpipe [ 31%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/error.cc.o [ 32%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/helpers.cc.o [ 32%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/allocator.cc.o [ 32%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/address.cc.o [ 32%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/error.cc.o [ 32%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/fd.cc.o [ 33%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/system.cc.o [ 33%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/context.cc.o [ 33%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/socket.cc.o [ 33%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/context_impl.cc.o [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/error.cc.o [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/listener.cc.o [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/pipe_impl.cc.o [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/pipe.cc.o [ 36%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/error.cc.o [ 36%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/listener_impl.cc.o [ 36%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/basic/channel_impl.cc.o [ 36%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/basic/factory.cc.o [ 36%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/xth/channel_impl.cc.o [ 37%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/cma/channel_impl.cc.o [ 37%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/mpt/factory.cc.o [ 37%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/cma/factory.cc.o [ 37%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/cma/context_impl.cc.o [ 38%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/connection_impl.cc.o [ 38%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/mpt/channel_impl.cc.o [ 38%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/factory.cc.o [ 38%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/basic/context_impl.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/shm_segment.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/xth/factory.cc.o [ 40%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/xth/context_impl.cc.o [ 40%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/context_impl.cc.o [ 42%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/utility.cc.o [ 42%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/connection_impl.cc.o [ 42%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/reactor.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/listener_impl.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/listener_impl.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/error.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/factory.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/sockaddr.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/sockaddr.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/connection_impl.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/error.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/listener_impl.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/utility.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/mpt/context_impl.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/context_impl.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/epoll_loop.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/ibv.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/loop.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/reactor.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/context_impl.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/factory.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/sockaddr.cc.o -- Performing Test SUPPORT_CXX14 - Success [ 32%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/error.cc.o [ 32%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/helpers.cc.o [ 33%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/address.cc.o [ 33%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/allocator.cc.o [ 33%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/error.cc.o [ 33%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/fd.cc.o [ 33%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/socket.cc.o [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/system.cc.o [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/context.cc.o [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/context_impl.cc.o [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/error.cc.o -- Check size of unsigned short - done -- Searching 16 bit integer - Using unsigned short [ 36%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/listener.cc.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 36%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/listener_impl.cc.o [ 36%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/pipe.cc.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 36%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/pipe_impl.cc.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 37%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/error.cc.o In file included from include/core.h:57:0, from include/transport.h:13, from include/comm.h:10, from include/enqueue.h:10, from enqueue.cc:7: enqueue.cc: In function 'ncclResult_t ncclLaunchCooperativeKernelMultiDevice(cudaLaunchParams*, int*, int, int)': enqueue.cc:135:97: warning: 'cudaError_t cudaLaunchCooperativeKernelMultiDevice(cudaLaunchParams*, unsigned int, unsigned int)' is deprecated [-Wdeprecated-declarations] cudaCooperativeLaunchMultiDeviceNoPreSync|cudaCooperativeLaunchMultiDeviceNoPostSync)); ^ include/checks.h:14:23: note: in definition of macro 'CUDACHECK' cudaError_t err = cmd; \ ^~~ In file included from /usr/local/cuda/include/channel_descriptor.h:61:0, from /usr/local/cuda/include/cuda_runtime.h:95, from /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/include/nccl.h:10, from include/devcomm.h:10, from include/transport.h:10, from include/comm.h:10, from include/enqueue.h:10, from enqueue.cc:7: /usr/local/cuda/include/cuda_runtime_api.h:4198:57: note: declared here extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaLaunchCooperativeKernelMultiDevice(struct cudaLaunchParams *launchParamsList, unsigned int numDevices, unsigned int flags __dv(0)); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ [ 37%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/basic/channel_impl.cc.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 37%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/basic/context_impl.cc.o [ 37%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/basic/factory.cc.o [ 38%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/xth/context_impl.cc.o [ 38%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/xth/channel_impl.cc.o -- Found OpenMP_C: -fopenmp (found version "4.5") nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 38%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/xth/factory.cc.o [ 38%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/cma/channel_impl.cc.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 38%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/cma/context_impl.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/cma/factory.cc.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/mpt/channel_impl.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/mpt/factory.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/mpt/context_impl.cc.o -- Found OpenMP_CXX: -fopenmp (found version "4.5") -- Found OpenMP: TRUE (found version "4.5") -- Build with OpenMP. -- Build with LIBXSMM optimization. -- Looking for sys/epoll.h nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/context_impl.cc.o [ 40%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/connection_impl.cc.o [ 40%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/error.cc.o [ 40%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/factory.cc.o [ 42%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/listener_impl.cc.o [ 42%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/loop.cc.o [ 42%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/sockaddr.cc.o [ 42%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/utility.cc.o -- Check if the system is big endian - little endian -- C:/Jenkins/workspace/dgl_PR-4648/third_party/dmlc-core/cmake/build_config.h.in -> include/dmlc/build_config.h -- Looking for sys/epoll.h - found -- Found OpenMP_C: -fopenmp (found version "4.5") -- Found OpenMP_CXX: -fopenmp (found version "4.5") -- Looking for clock_gettime in rt -- Looking for clock_gettime in rt - found -- Looking for fopen64 [ 42%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/epoll_loop.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/shm_segment.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/connection_impl.cc.o [ 43%] Linking C static library libmetis.a [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/context_impl.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/factory.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/listener_impl.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/reactor.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/sockaddr.cc.o [ 44%] Built target metis [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/ibv.cc.o -- Found PythonInterp: C:/Users/Administrator/Envs/jenkins-dgl-PR-4648-2/Scripts/python.exe (found version "3.6.7") -- Build with unittest CMake Deprecation Warning at third_party/googletest/CMakeLists.txt:4 (cmake_minimum_required): Compatibility with CMake < 2.8.12 will be removed from a future version of CMake. Update the VERSION argument value or use a ... suffix to tell CMake that the project does not need compatibility with older versions. CMake Deprecation Warning at third_party/googletest/googlemock/CMakeLists.txt:45 (cmake_minimum_required): Compatibility with CMake < 2.8.12 will be removed from a future version of CMake. Update the VERSION argument value or use a ... suffix to tell CMake that the project does not need compatibility with older versions. CMake Deprecation Warning at third_party/googletest/googletest/CMakeLists.txt:56 (cmake_minimum_required): Compatibility with CMake < 2.8.12 will be removed from a future version of CMake. Update the VERSION argument value or use a ... suffix to tell CMake that the project does not need compatibility with older versions. -- Looking for pthread.h -- Looking for fopen64 - not found -- Looking for C++ include cxxabi.h [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/connection_impl.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/context_impl.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/error.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/factory.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/listener_impl.cc.o [ 46%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/reactor.cc.o -- Looking for pthread.h - not found -- Found Threads: TRUE -- Configuring done -- Looking for C++ include cxxabi.h - found -- Looking for nanosleep -- Looking for nanosleep - found -- Looking for backtrace [ 46%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/sockaddr.cc.o [ 46%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/utility.cc.o -- Generating done -- Build files have been written to: C:/Jenkins/workspace/dgl_PR-4648/build Microsoft (R) Build Engine version 16.5.1+4616136f8 for .NET Framework Copyright (C) Microsoft Corporation. All rights reserved. Build started 9/27/2022 4:42:41 AM. -- find_cmake.py output: /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake;1.9.0 -- Configuring for PyTorch 1.9.0 -- Setting directory to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake/Torch -- Looking for pthread.h -- Looking for pthread.h - found -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Looking for backtrace - found -- backtrace facility detected in default set of libraries -- Found Backtrace: /usr/include -- Check if the system is big endian -- Searching 16 bit integer -- Looking for sys/types.h 1>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" on node 1 (default targets). 1>ValidateSolutionConfiguration: Building solution configuration "Release|x64". ValidateProjects: The project "INSTALL" is not selected for building in solution configuration "Release|x64". The project "RUN_TESTS" is not selected for building in solution configuration "Release|x64". The project "dmlc_lint" is not selected for building in solution configuration "Release|x64". -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed -- Looking for pthread_create in pthreads [ 45%] Linking C static library libmetis.a -- Looking for sys/types.h - found -- Looking for stdint.h 1>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\build\ALL_BUILD.vcxproj.metaproj" (2) on node 1 (default targets). 2>Project "C:\Jenkins\workspace\dgl_PR-4648\build\ALL_BUILD.vcxproj.metaproj" (2) is building "C:\Jenkins\workspace\dgl_PR-4648\build\ZERO_CHECK.vcxproj" (11) on node 2 (default targets). 11>PrepareForBuild: Creating directory "x64\Release\ZERO_CHECK\". Creating directory "x64\Release\ZERO_CHECK\ZERO_CHECK.tlog\". InitializeBuildStatus: Creating "x64\Release\ZERO_CHECK\ZERO_CHECK.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. CustomBuild: Checking Build System FinalizeBuildStatus: Deleting file "x64\Release\ZERO_CHECK\ZERO_CHECK.tlog\unsuccessfulbuild". Touching "x64\Release\ZERO_CHECK\ZERO_CHECK.tlog\ZERO_CHECK.lastbuildstate". 11>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\ZERO_CHECK.vcxproj" (default targets). [ 45%] Built target metis -- Looking for pthread_create in pthreads - not found -- Looking for pthread_create in pthread -- Looking for pthread_create in pthread - found -- Found Threads: TRUE -- Found CUDA: /usr/local/cuda (found version "10.1") -- Caffe2: CUDA detected: 10.1 -- Caffe2: CUDA nvcc is: /usr/local/cuda/bin/nvcc -- Caffe2: CUDA toolkit directory: /usr/local/cuda 1>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\build\tensoradapter_pytorch.vcxproj.metaproj" (10) on node 1 (default targets). 10>Project "C:\Jenkins\workspace\dgl_PR-4648\build\tensoradapter_pytorch.vcxproj.metaproj" (10) is building "C:\Jenkins\workspace\dgl_PR-4648\build\tensoradapter_pytorch.vcxproj" (12) on node 2 (default targets). 12>PrepareForBuild: Creating directory "x64\Release\tensoradapter_pytorch\". Creating directory "x64\Release\tensoradapter_pytorch\tensorad.15EA3878.tlog\". InitializeBuildStatus: Creating "x64\Release\tensoradapter_pytorch\tensorad.15EA3878.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. CustomBuild: (jenkins-dgl-PR-4648-2) administrator@WIN-O7QS55HVSDB C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch>REM Helper script to build tensor adapter libraries for PyTorch Could Not Find C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build -- Selecting Windows SDK version 10.0.18362.0 to target Windows 6.3.9600. 1>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googletest\gtest.vcxproj.metaproj" (7) on node 1 (default targets). 7>Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googletest\gtest.vcxproj.metaproj" (7) is building "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googletest\gtest.vcxproj" (13) on node 3 (default targets). 13>PrepareForBuild: Creating directory "gtest.dir\Release\". Creating directory "C:\Jenkins\workspace\dgl_PR-4648\build\lib\Release\". Creating directory "gtest.dir\Release\gtest.tlog\". InitializeBuildStatus: Creating "gtest.dir\Release\gtest.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. -- Looking for stdint.h - found -- Looking for stddef.h -- Looking for stddef.h - found -- Check size of unsigned short -- Check size of unsigned short - done -- Using unsigned short 1>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googlemock\gmock_main.vcxproj.metaproj" (6) on node 1 (default targets). 6>Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googlemock\gmock_main.vcxproj.metaproj" (6) is building "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googlemock\gmock_main.vcxproj" (14) on node 4 (default targets). 14>PrepareForBuild: Creating directory "gmock_main.dir\Release\". Creating directory "gmock_main.dir\Release\gmock_main.tlog\". InitializeBuildStatus: Creating "gmock_main.dir\Release\gmock_main.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. 13>CustomBuild: Building Custom Rule C:/Jenkins/workspace/dgl_PR-4648/third_party/googletest/googletest/CMakeLists.txt 1>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googlemock\gmock.vcxproj.metaproj" (5) on node 1 (default targets). 5>Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googlemock\gmock.vcxproj.metaproj" (5) is building "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googlemock\gmock.vcxproj" (15) on node 5 (default targets). 15>PrepareForBuild: Creating directory "gmock.dir\Release\". 13>MakeDirsForCl: Creating directory "C:\Jenkins\workspace\dgl_PR-4648\build\bin\Release". 15>PrepareForBuild: Creating directory "gmock.dir\Release\gmock.tlog\". InitializeBuildStatus: Creating "gmock.dir\Release\gmock.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. 14>CustomBuild: Building Custom Rule C:/Jenkins/workspace/dgl_PR-4648/third_party/googletest/googlemock/CMakeLists.txt 13>ClCompile: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /Zi /nologo /W4 /WX /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D _UNICODE /D UNICODE /D WIN32 /D _WIN32 /D STRICT /D WIN32_LEAN_AND_MEAN /D GTEST_HAS_PTHREAD=0 /D _HAS_EXCEPTIONS=1 /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D "CMAKE_INTDIR=\"Release\"" /D _UNICODE /D UNICODE /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /Fo"gtest.dir\Release\\" /Fd"C:\Jenkins\workspace\dgl_PR-4648\build\bin\Release\gtest.pdb" /Gd /TP /wd4251 /wd4275 /wd4702 /errorReport:queue -J "C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\src\gtest-all.cc" 1>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj.metaproj" (4) on node 1 (default targets). 4>Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj.metaproj" (4) is building "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj" (16) on node 6 (default targets). 16>PrepareForBuild: Creating directory "dmlc.dir\Release\". Creating directory "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\Release\". Creating directory "dmlc.dir\Release\dmlc.tlog\". InitializeBuildStatus: Creating "dmlc.dir\Release\dmlc.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. 15>CustomBuild: Building Custom Rule C:/Jenkins/workspace/dgl_PR-4648/third_party/googletest/googlemock/CMakeLists.txt 13>ClCompile: gtest-all.cc 14>ClCompile: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googlemock\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googlemock" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /Zi /nologo /W4 /WX /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D _UNICODE /D UNICODE /D WIN32 /D _WIN32 /D STRICT /D WIN32_LEAN_AND_MEAN /D GTEST_HAS_PTHREAD=0 /D _HAS_EXCEPTIONS=1 /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D "CMAKE_INTDIR=\"Release\"" /D _UNICODE /D UNICODE /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /Fo"gmock_main.dir\Release\\" /Fd"C:\Jenkins\workspace\dgl_PR-4648\build\bin\Release\gmock_main.pdb" /Gd /TP /wd4251 /wd4275 /wd4702 /errorReport:queue -J "C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\src\gtest-all.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googlemock\src\gmock-all.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googlemock\src\gmock_main.cc" 16>CustomBuild: Building Custom Rule C:/Jenkins/workspace/dgl_PR-4648/third_party/dmlc-core/CMakeLists.txt 15>ClCompile: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googlemock\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googlemock" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /Zi /nologo /W4 /WX /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D _UNICODE /D UNICODE /D WIN32 /D _WIN32 /D STRICT /D WIN32_LEAN_AND_MEAN /D GTEST_HAS_PTHREAD=0 /D _HAS_EXCEPTIONS=1 /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D "CMAKE_INTDIR=\"Release\"" /D _UNICODE /D UNICODE /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /Fo"gmock.dir\Release\\" /Fd"C:\Jenkins\workspace\dgl_PR-4648\build\bin\Release\gmock.pdb" /Gd /TP /wd4251 /wd4275 /wd4702 /errorReport:queue -J "C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\src\gtest-all.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googlemock\src\gmock-all.cc" 14>ClCompile: gtest-all.cc gmock-all.cc gmock_main.cc 15>ClCompile: gtest-all.cc gmock-all.cc 16>ClCompile: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include" /I"C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\include" /nologo /W1 /WX- /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D DMLC_USE_HDFS=0 /D DMLC_USE_S3=0 /D DMLC_USE_AZURE=0 /D _XOPEN_SOURCE=700 /D _POSIX_SOURCE /D _POSIX_C_SOURCE=200809L /D _DARWIN_C_SOURCE /D __USE_XOPEN2K8 /D DMLC_CORE_USE_CMAKE /D DMLC_USE_CXX11=1 /D WIN32_LEAN_AND_MEAN /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D "CMAKE_INTDIR=\"Release\"" /D _MBCS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /Fo"dmlc.dir\Release\\" /Fd"dmlc.dir\Release\dmlc.pdb" /Gd /TP /errorReport:queue "C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\config.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\data.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\recordio.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\line_split.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\recordio_split.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\indexed_recordio_split.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\input_split_base.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\filesys.cc" "C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\local_filesys.cc" Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sum_i8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sum_u8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sum_i32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sum_u32.o -- Check if the system is big endian - little endian -- /root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core/cmake/build_config.h.in -> include/dmlc/build_config.h -- Performing Test SUPPORT_MSSE2 config.cc data.cc io.cc recordio.cc line_split.cc recordio_split.cc indexed_recordio_split.cc input_split_base.cc filesys.cc local_filesys.cc Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sum_i64.o -- Caffe2: Header version is: 10.1 -- Found CUDNN: /usr/lib/x86_64-linux-gnu/libcudnn.so -- Found cuDNN: v7.6.5 (include: /usr/include, library: /usr/lib/x86_64-linux-gnu/libcudnn.so) CMake Warning at /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake/Caffe2/public/cuda.cmake:203 (message): Failed to compute shorthash for libnvrtc.so Call Stack (most recent call first): /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake/Caffe2/Caffe2Config.cmake:88 (include) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:68 (find_package) CMakeLists.txt:26 (find_package) Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sum_u64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sum_f16.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sum_f32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sum_f64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_prod_i8.o -- Performing Test SUPPORT_MSSE2 - Success -- Looking for execinfo.h -- Looking for execinfo.h - found -- Looking for getline 16>C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\indexed_recordio_split.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\indexed_recordio_split.cc : message : see previous definition of 'NOMINMAX' [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] 16>C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\recordio_split.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\recordio_split.cc : message : see previous definition of 'NOMINMAX' [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] 16>C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\line_split.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\line_split.cc : message : see previous definition of 'NOMINMAX' [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] 16>C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\filesys.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\filesys.cc : message : see previous definition of 'NOMINMAX' [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] 16>C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\input_split_base.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\input_split_base.cc : message : see previous definition of 'NOMINMAX' [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] 16>C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io.cc : message : see previous definition of 'NOMINMAX' [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] 12>CustomBuild: -- The C compiler identification is MSVC 19.25.28614.0 16>C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\local_filesys.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\local_filesys.cc : message : see previous definition of 'NOMINMAX' [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_prod_u8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_prod_i32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_prod_u32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_prod_i64.o -- Automatic GPU detection failed. Building for common architectures. -- Autodetected CUDA architecture(s): 3.5;5.0;5.2;6.0;6.1;7.0;7.5;7.5+PTX -- Added CUDA NVCC flags for: -gencode;arch=compute_35,code=sm_35;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_52,code=sm_52;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_75,code=compute_75 Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_prod_u64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sum_i8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sum_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sum_i32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sum_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sum_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). -- Looking for getline - found -- Performing Test UV_LINT_W4 -- Performing Test UV_LINT_W4 - Failed -- Performing Test UV_LINT_NO_UNUSED_PARAMETER_MSVC -- Performing Test UV_LINT_NO_UNUSED_PARAMETER_MSVC - Failed -- Performing Test UV_LINT_NO_CONDITIONAL_CONSTANT_MSVC Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_prod_f16.o CMake Warning at /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:22 (message): static library kineto_LIBRARY-NOTFOUND not found. Call Stack (most recent call first): /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:127 (append_torchlib_if_found) CMakeLists.txt:26 (find_package) -- Found Torch: /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/lib/libtorch.so -- tensoradapter found PyTorch includes: /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/include;/opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/include/torch/csrc/api/include -- tensoradapter found PyTorch lib: torch -- Configured target tensoradapter_pytorch_1.9.0 -- Configuring done -- Generating done -- Build files have been written to: /root/jenkins/workspace/dgl_PR-4648@2/tensoradapter/pytorch/build Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_prod_f32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_prod_f64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_min_i8.o Scanning dependencies of target tensoradapter_pytorch_1.9.0 [ 50%] Building CXX object CMakeFiles/tensoradapter_pytorch_1.9.0.dir/torch.cpp.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_min_u8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sum_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). -- Performing Test UV_LINT_NO_CONDITIONAL_CONSTANT_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_MSVC -- Performing Test UV_LINT_NO_NONSTANDARD_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_EMPTY_TU_MSVC -- Performing Test UV_LINT_NO_NONSTANDARD_EMPTY_TU_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_FILE_SCOPE_MSVC -- Performing Test UV_LINT_NO_NONSTANDARD_FILE_SCOPE_MSVC - Failed -- Performing Test UV_LINT_NO_NONSTANDARD_NONSTATIC_DLIMPORT_MSVC Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_min_i32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_min_u32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_min_i64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_min_u64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_min_f16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sum_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sum_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sum_f64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sum_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). -- Performing Test UV_LINT_NO_NONSTANDARD_NONSTATIC_DLIMPORT_MSVC - Failed -- Performing Test UV_LINT_NO_HIDES_LOCAL -- Performing Test UV_LINT_NO_HIDES_LOCAL - Failed -- Performing Test UV_LINT_NO_HIDES_PARAM -- Performing Test UV_LINT_NO_HIDES_PARAM - Failed -- Performing Test UV_LINT_NO_HIDES_GLOBAL -- Performing Test UV_LINT_NO_HIDES_GLOBAL - Failed -- Performing Test UV_LINT_NO_CONDITIONAL_ASSIGNMENT_MSVC Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_min_f32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_min_f64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_max_i8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_max_u8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_max_i32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_max_u32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_prod_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_prod_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_prod_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_prod_u32.o -- Performing Test UV_LINT_NO_CONDITIONAL_ASSIGNMENT_MSVC - Failed -- Performing Test UV_LINT_NO_UNSAFE_MSVC -- Performing Test UV_LINT_NO_UNSAFE_MSVC - Failed -- Performing Test UV_LINT_WALL -- Performing Test UV_LINT_WALL - Success -- Performing Test UV_LINT_NO_UNUSED_PARAMETER CustomBuild: -- The CXX compiler identification is MSVC 19.25.28614.0 -- Detecting C compiler ABI info Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_max_i64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_max_u64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_max_f16.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_max_f32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_max_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_prod_i64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_prod_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). -- Performing Test UV_LINT_NO_UNUSED_PARAMETER - Success -- Performing Test UV_LINT_STRICT_PROTOTYPES Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_premulsum_i8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_premulsum_u8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_premulsum_i32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_premulsum_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_prod_f16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_prod_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_prod_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_prod_bf16.o -- Performing Test UV_LINT_STRICT_PROTOTYPES - Success -- Performing Test UV_LINT_EXTRA -- Performing Test UV_LINT_EXTRA - Success -- Performing Test UV_LINT_UTF8_MSVC -- Performing Test UV_LINT_UTF8_MSVC - Failed -- Performing Test UV_F_STRICT_ALIASING Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_premulsum_i64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_premulsum_u64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_premulsum_f16.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_premulsum_f32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_premulsum_f64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_i8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_min_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_min_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_min_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_min_u32.o -- Performing Test UV_F_STRICT_ALIASING - Success -- summary of build options: Install prefix: /usr/local Target system: Linux Compiler: C compiler: /usr/bin/cc CFLAGS: -fopenmp -O2 -fPIC -DUSE_AVX -DUSE_LIBXSMM -DDGL_CPU_LLC_SIZE=40000000 -DUSE_EPOLL -DIDXTYPEWIDTH=64 -DREALTYPEWIDTH=32 -DLINUX -D_FILE_OFFSET_BITS=64 -std=c99 -fno-strict-aliasing -march=native -fPIC -Werror -Wno-unused-function -Wno-unused-but-set-variable -Wno-unused-variable -Wno-unknown-pragmas -DNDEBUG -DNDEBUG2 -DHAVE_EXECINFO_H -DHAVE_GETLINE -O3 -- Found uv: 1.38.1 (found version "1.38.1") -- Build with unittest -- Found PythonInterp: /usr/bin/python3.6 (found version "3.6.9") -- Looking for pthread.h Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_u8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_i32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_u32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_min_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_min_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_min_f16.o -- Detecting C compiler ABI info - done -- Check for working C compiler: C:/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC/14.25.28610/bin/Hostx64/x64/cl.exe - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Looking for pthread.h - found -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE -- Building dist/rpc tests -- Configuring done Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_u64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_f16.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_f32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_f64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sum_i8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_min_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_min_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_min_bf16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_max_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). -- Generating done -- Build files have been written to: /root/jenkins/workspace/dgl_PR-4648/build Scanning dependencies of target tensoradapter_pytorch Scanning dependencies of target gtest Scanning dependencies of target libxsmm Scanning dependencies of target dmlc [ 0%] Building CXX object third_party/googletest/googletest/CMakeFiles/gtest.dir/src/gtest-all.cc.o [ 0%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io.cc.o [ 0%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/config.cc.o [ 1%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/recordio.cc.o [ 1%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/indexed_recordio_split.cc.o [ 1%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/recordio_split.cc.o Scanning dependencies of target tensorpipe_uv [ 2%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/input_split_base.cc.o [ 2%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/data.cc.o [ 2%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/filesys.cc.o [ 2%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/line_split.cc.o [ 2%] Building CXX object third_party/dmlc-core/CMakeFiles/dmlc.dir/src/io/local_filesys.cc.o Scanning dependencies of target metis [ 3%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/loop-watcher.c.o [ 3%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/signal.c.o [ 3%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/random.c.o [ 3%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/random-devurandom.c.o [ 3%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/strscpy.c.o [ 3%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/fs.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/getaddrinfo.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/timer.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/dl.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/async.c.o [ 4%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/core.c.o [ 6%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/threadpool.c.o [ 6%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/procfs-exepath.c.o [ 6%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/inet.c.o [ 6%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/fs-poll.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/loop.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/process.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/uv-data-getter-setters.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/linux-core.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/getnameinfo.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/random-sysctl-linux.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/stream.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/poll.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/udp.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/uv-common.c.o [ 7%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/pipe.c.o [ 8%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/proctitle.c.o [ 9%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/idna.c.o [ 9%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/thread.c.o [ 11%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/version.c.o [ 12%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/tcp.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/linux-syscalls.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/tty.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/linux-inotify.c.o [ 13%] Building C object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe_uv.dir/__/third_party/libuv/src/unix/random-getrandom.c.o [ 13%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/b64.c.o [ 13%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/blas.c.o [ 13%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/csr.c.o [ 14%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/error.c.o [ 14%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/gk_util.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/contig.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/evaluate.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/memory.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/gkregex.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/tokenizer.c.o [ 16%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/fs.c.o [ 18%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/timers.c.o [ 18%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/graph.c.o [ 19%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/getopt.c.o [ 19%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/string.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/mcore.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/bucketsort.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/fkvkselect.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/kwayfm.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/compress.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/io.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/fm.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/seq.c.o [ 20%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/checkgraph.c.o [ 22%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/rw.c.o [ 22%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/auxapi.c.o [ 23%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/htable.c.o [ 23%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/gklib.c.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sum_u8.o [100%] Linking CXX shared library libtensoradapter_pytorch_1.9.0.so Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sum_i32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sum_u32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sum_i64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_max_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_max_i32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_max_u32.o [ 23%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/coarsen.c.o [ 23%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/graph.c.o [ 23%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/kwayrefine.c.o [ 23%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mesh.c.o [ 23%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/debug.c.o [ 23%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/random.c.o [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mcutil.c.o [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/sort.c.o [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/ometis.c.o [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/itemsets.c.o [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/options.c.o [ 24%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/minconn.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/fortran.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/__/GKlib/pqueue.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/frename.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mmd.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/pmetis.c.o [ 25%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/meshpart.c.o [ 27%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/initpart.c.o [ 27%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/kmetis.c.o [ 27%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/util.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/separator.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/wspace.c.o [ 28%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/stat.c.o [ 29%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/parmetis.c.o [ 29%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/balance.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/timing.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/sfm.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/refine.c.o [ 30%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/srefine.c.o [ 32%] Building C object third_party/METIS/libmetis/CMakeFiles/metis.dir/mincover.c.o -- The C compiler identification is GNU 7.5.0 Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sum_u64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sum_f16.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sum_f32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sum_f64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_prod_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_max_i64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_max_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_max_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). -- The CXX compiler identification is GNU 7.5.0 -- Check for working C compiler: /usr/bin/cc -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: C:/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC/14.25.28610/bin/Hostx64/x64/cl.exe - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Using Python interpreter: C:/Program Files/Python36/python.exe Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_prod_u8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_prod_i32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_prod_u32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_prod_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_max_f32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_max_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_max_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_premulsum_i8.o -- Check for working C compiler: /usr/bin/cc -- works -- Detecting C compiler ABI info Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_prod_u64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_prod_f16.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_prod_f32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_prod_f64.o [100%] Built target tensoradapter_pytorch_1.9.0 Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_min_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_premulsum_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_premulsum_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). -- Detecting C compiler ABI info - done -- Detecting C compile features -- Detecting C compile features - done -- Check for working CXX compiler: /usr/bin/c++ [ 33%] Linking C static library libtensorpipe_uv.a -- find_cmake.py output: C:\Program Files\Python36\lib\site-packages\torch\share\cmake;1.9.0 -- Configuring for PyTorch 1.9.0 -- Setting directory to C:\Program Files\Python36\lib\site-packages\torch\share\cmake/Torch -- Looking for pthread.h 'libtensoradapter_pytorch_1.9.0.so' -> '/root/jenkins/workspace/dgl_PR-4648@2/build/tensoradapter/pytorch/libtensoradapter_pytorch_1.9.0.so' [ 45%] Built target tensoradapter_pytorch Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_min_u8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_min_i32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_min_u32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_min_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_premulsum_u32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_premulsum_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_premulsum_u64.o [ 33%] Built target tensorpipe_uv -- Check for working CXX compiler: /usr/bin/c++ -- works -- Detecting CXX compiler ABI info Scanning dependencies of target tensorpipe [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/address.cc.o [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/helpers.cc.o [ 34%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/allocator.cc.o [ 35%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/socket.cc.o [ 35%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/error.cc.o [ 35%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/error.cc.o [ 35%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/system.cc.o [ 35%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/context.cc.o [ 35%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/cma/factory.cc.o [ 37%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/xth/factory.cc.o [ 37%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/fd.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/pipe.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/basic/channel_impl.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/mpt/channel_impl.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/error.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/cma/channel_impl.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/listener_impl.cc.o [ 39%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/mpt/context_impl.cc.o [ 40%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/error.cc.o [ 41%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/basic/context_impl.cc.o [ 41%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/xth/channel_impl.cc.o [ 41%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/context_impl.cc.o [ 41%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/cma/context_impl.cc.o [ 41%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/mpt/factory.cc.o [ 41%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/factory.cc.o [ 41%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/pipe_impl.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/listener.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/listener_impl.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/ibv.cc.o [ 43%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/sockaddr.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/connection_impl.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/connection_impl.cc.o [ 44%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/sockaddr.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/listener_impl.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/utility.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/basic/factory.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/loop.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/channel/xth/context_impl.cc.o [ 45%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/reactor.cc.o [ 46%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/epoll_loop.cc.o [ 46%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/core/error.cc.o [ 46%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/context_impl.cc.o [ 48%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/error.cc.o [ 48%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/connection_impl.cc.o [ 48%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/context_impl.cc.o [ 48%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/uv/context_impl.cc.o [ 49%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/factory.cc.o [ 50%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/sockaddr.cc.o [ 50%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/listener_impl.cc.o [ 50%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/common/shm_segment.cc.o [ 50%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/shm/reactor.cc.o [ 50%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/factory.cc.o [ 50%] Building CXX object third_party/tensorpipe/tensorpipe/CMakeFiles/tensorpipe.dir/transport/ibv/utility.cc.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_min_u64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_min_f16.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_min_f32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_min_f64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_premulsum_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_premulsum_f32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_premulsum_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). -- Detecting CXX compiler ABI info - done -- Detecting CXX compile features -- Detecting CXX compile features - done -- Using Python interpreter: /opt/conda/envs/pytorch-ci/bin/python Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_max_i8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_max_u8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_max_i32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_max_u32.o -- Looking for pthread.h - not found -- Found Threads: TRUE -- Found Torch: C:/Program Files/Python36/Lib/site-packages/torch/lib/torch.lib -- tensoradapter found PyTorch includes: C:/Program Files/Python36/Lib/site-packages/torch/include;C:/Program Files/Python36/Lib/site-packages/torch/include/torch/csrc/api/include -- tensoradapter found PyTorch lib: torch -- Configured target tensoradapter_pytorch_1.9.0 -- Configuring done Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_premulsum_bf16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_max_i64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_max_u64.o -- Generating done 12>CUSTOMBUILD : CMake warning : [C:\Jenkins\workspace\dgl_PR-4648\build\tensoradapter_pytorch.vcxproj] -- Build files have been written to: C:/Jenkins/workspace/dgl_PR-4648/tensoradapter/pytorch/build Manually-specified variables were not used by the project: CUDA_TOOLKIT_ROOT_DIR TORCH_CUDA_ARCH_LIST Microsoft (R) Build Engine version 16.5.1+4616136f8 for .NET Framework Copyright (C) Microsoft Corporation. All rights reserved. Build started 9/27/2022 4:42:46 AM. 1>Project "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\tensoradapter_pytorch.sln" on node 1 (default targets). 1>ValidateSolutionConfiguration: Building solution configuration "Release|x64". ValidateProjects: The project "ALL_BUILD" is not selected for building in solution configuration "Release|x64". Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_i32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_max_f16.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_max_f32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_max_f64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_premulsum_i8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_premulsum_i32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_premulsum_u8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_premulsum_u32.o 1>Project "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\tensoradapter_pytorch.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\ZERO_CHECK.vcxproj" (2) on node 1 (default targets). 2>PrepareForBuild: Creating directory "x64\Release\ZERO_CHECK\". Creating directory "x64\Release\ZERO_CHECK\ZERO_CHECK.tlog\". InitializeBuildStatus: Creating "x64\Release\ZERO_CHECK\ZERO_CHECK.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. CustomBuild: Checking Build System FinalizeBuildStatus: Deleting file "x64\Release\ZERO_CHECK\ZERO_CHECK.tlog\unsuccessfulbuild". Touching "x64\Release\ZERO_CHECK\ZERO_CHECK.tlog\ZERO_CHECK.lastbuildstate". 2>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\ZERO_CHECK.vcxproj" (default targets). 1>Project "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\tensoradapter_pytorch.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\tensoradapter_pytorch_1.9.0.vcxproj.metaproj" (3) on node 1 (default targets). 3>Project "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\tensoradapter_pytorch_1.9.0.vcxproj.metaproj" (3) is building "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\tensoradapter_pytorch_1.9.0.vcxproj" (4) on node 1 (default targets). 4>PrepareForBuild: Creating directory "tensoradapter_pytorch_1.9.0.dir\Release\". Creating directory "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\Release\". Creating directory "tensoradapter_pytorch_1.9.0.dir\Release\tensorad.D2CCE92A.tlog\". InitializeBuildStatus: Creating "tensoradapter_pytorch_1.9.0.dir\Release\tensorad.D2CCE92A.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. CustomBuild: Building Custom Rule C:/Jenkins/workspace/dgl_PR-4648/tensoradapter/pytorch/CMakeLists.txt Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_u32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_i64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_u64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_f32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_bf16.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_premulsum_i64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_premulsum_u64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_premulsum_f16.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_premulsum_f32.o ClCompile: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\..\include" /I"C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\..\..\third_party\dlpack\include" /I"C:\Program Files\Python36\Lib\site-packages\torch\include" /I"C:\Program Files\Python36\Lib\site-packages\torch\include\torch\csrc\api\include" /nologo /W3 /WX- /diagnostics:column /O2 /Ob2 /D WIN32 /D _WINDOWS /D NDEBUG /D NOMINMAX /D "CMAKE_INTDIR=\"Release\"" /D tensoradapter_pytorch_1_9_0_EXPORTS /D _WINDLL /D _MBCS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /GR /std:c++14 /Fo"tensoradapter_pytorch_1.9.0.dir\Release\\" /Fd"tensoradapter_pytorch_1.9.0.dir\Release\vc142.pdb" /Gd /TP /wd4267 /wd4251 /wd4522 /wd4838 /wd4305 /wd4244 /wd4190 /wd4101 /wd4996 /wd4275 /errorReport:queue /bigobj "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\torch.cpp" torch.cpp nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sum_i8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sum_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_premulsum_f64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sumpostdiv_i8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sumpostdiv_u8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sum_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sum_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sum_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sum_u64.o [ 50%] Linking C static library libmetis.a [ 50%] Built target metis Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sumpostdiv_i32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sumpostdiv_u32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sumpostdiv_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sum_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sum_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Link: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\link.exe /ERRORREPORT:QUEUE /OUT:"C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\Release\tensoradapter_pytorch_1.9.0.dll" /INCREMENTAL:NO /NOLOGO "C:\Program Files\Python36\Lib\site-packages\torch\lib\torch.lib" "C:\Program Files\Python36\Lib\site-packages\torch\lib\torch_cpu.lib" "C:\Program Files\Python36\Lib\site-packages\torch\lib\c10.lib" kernel32.lib user32.lib gdi32.lib winspool.lib shell32.lib ole32.lib oleaut32.lib uuid.lib comdlg32.lib advapi32.lib /MANIFEST /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /manifest:embed /PDB:"C:/Jenkins/workspace/dgl_PR-4648/tensoradapter/pytorch/build/Release/tensoradapter_pytorch_1.9.0.pdb" /SUBSYSTEM:CONSOLE /TLBID:1 /DYNAMICBASE /NXCOMPAT /IMPLIB:"C:/Jenkins/workspace/dgl_PR-4648/tensoradapter/pytorch/build/Release/tensoradapter_pytorch_1.9.0.lib" /MACHINE:X64 /machine:x64 /DLL tensoradapter_pytorch_1.9.0.dir\Release\torch.obj Creating library C:/Jenkins/workspace/dgl_PR-4648/tensoradapter/pytorch/build/Release/tensoradapter_pytorch_1.9.0.lib and object C:/Jenkins/workspace/dgl_PR-4648/tensoradapter/pytorch/build/Release/tensoradapter_pytorch_1.9.0.exp Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sum_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sum_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). tensoradapter_pytorch_1.9.0.vcxproj -> C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\Release\tensoradapter_pytorch_1.9.0.dll FinalizeBuildStatus: Deleting file "tensoradapter_pytorch_1.9.0.dir\Release\tensorad.D2CCE92A.tlog\unsuccessfulbuild". Touching "tensoradapter_pytorch_1.9.0.dir\Release\tensorad.D2CCE92A.tlog\tensoradapter_pytorch_1.9.0.lastbuildstate". 4>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\tensoradapter_pytorch_1.9.0.vcxproj" (default targets). 3>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\tensoradapter_pytorch_1.9.0.vcxproj.metaproj" (default targets). 1>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\pytorch\build\tensoradapter_pytorch.sln" (default targets). Build succeeded. 0 Warning(s) 0 Error(s) Time Elapsed 00:00:01.29 Release\tensoradapter_pytorch_1.9.0.dll 1 file(s) copied. Building Custom Rule C:/Jenkins/workspace/dgl_PR-4648/CMakeLists.txt FinalizeBuildStatus: Deleting file "x64\Release\tensoradapter_pytorch\tensorad.15EA3878.tlog\unsuccessfulbuild". Touching "x64\Release\tensoradapter_pytorch\tensorad.15EA3878.tlog\tensoradapter_pytorch.lastbuildstate". 12>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\tensoradapter_pytorch.vcxproj" (default targets). 10>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\tensoradapter_pytorch.vcxproj.metaproj" (default targets). 16>Lib: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\Lib.exe /OUT:"C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\Release\dmlc.lib" /NOLOGO /MACHINE:X64 /machine:x64 dmlc.dir\Release\config.obj dmlc.dir\Release\data.obj dmlc.dir\Release\io.obj dmlc.dir\Release\recordio.obj dmlc.dir\Release\line_split.obj dmlc.dir\Release\recordio_split.obj dmlc.dir\Release\indexed_recordio_split.obj dmlc.dir\Release\input_split_base.obj dmlc.dir\Release\filesys.obj dmlc.dir\Release\local_filesys.obj 13>Lib: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\Lib.exe /OUT:"C:\Jenkins\workspace\dgl_PR-4648\build\lib\Release\gtest.lib" /NOLOGO /MACHINE:X64 /machine:x64 "gtest.dir\Release\gtest-all.obj" 15>Lib: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\Lib.exe /OUT:"C:\Jenkins\workspace\dgl_PR-4648\build\lib\Release\gmock.lib" /NOLOGO /MACHINE:X64 /machine:x64 "gmock.dir\Release\gtest-all.obj" "gmock.dir\Release\gmock-all.obj" 14>Lib: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\Lib.exe /OUT:"C:\Jenkins\workspace\dgl_PR-4648\build\lib\Release\gmock_main.lib" /NOLOGO /MACHINE:X64 /machine:x64 "gmock_main.dir\Release\gtest-all.obj" "gmock_main.dir\Release\gmock-all.obj" gmock_main.dir\Release\gmock_main.obj 13>Lib: gtest.vcxproj -> C:\Jenkins\workspace\dgl_PR-4648\build\lib\Release\gtest.lib 15>Lib: gmock.vcxproj -> C:\Jenkins\workspace\dgl_PR-4648\build\lib\Release\gmock.lib 14>Lib: gmock_main.vcxproj -> C:\Jenkins\workspace\dgl_PR-4648\build\lib\Release\gmock_main.lib 16>Lib: dmlc.vcxproj -> C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\Release\dmlc.lib 13>FinalizeBuildStatus: Deleting file "gtest.dir\Release\gtest.tlog\unsuccessfulbuild". Touching "gtest.dir\Release\gtest.tlog\gtest.lastbuildstate". 13>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googletest\gtest.vcxproj" (default targets). 7>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googletest\gtest.vcxproj.metaproj" (default targets). 14>FinalizeBuildStatus: Deleting file "gmock_main.dir\Release\gmock_main.tlog\unsuccessfulbuild". 15>FinalizeBuildStatus: Deleting file "gmock.dir\Release\gmock.tlog\unsuccessfulbuild". 14>FinalizeBuildStatus: Touching "gmock_main.dir\Release\gmock_main.tlog\gmock_main.lastbuildstate". 15>FinalizeBuildStatus: Touching "gmock.dir\Release\gmock.tlog\gmock.lastbuildstate". 14>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googlemock\gmock_main.vcxproj" (default targets). 15>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googlemock\gmock.vcxproj" (default targets). 16>FinalizeBuildStatus: Deleting file "dmlc.dir\Release\dmlc.tlog\unsuccessfulbuild". Touching "dmlc.dir\Release\dmlc.tlog\dmlc.lastbuildstate". 16>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj" (default targets). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sumpostdiv_u64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sumpostdiv_f16.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sumpostdiv_f32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/all_gather_sumpostdiv_f64.o -- find_cmake.py output: /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake;1.9.0 -- Configuring for PyTorch 1.9.0 -- Setting directory to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake/Torch -- Looking for pthread.h Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_prod_i8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_prod_u8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_prod_i32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_prod_u32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_prod_i64.o 6>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googlemock\gmock_main.vcxproj.metaproj" (default targets). 5>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googlemock\gmock.vcxproj.metaproj" (default targets). 4>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj.metaproj" (default targets). 1>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googletest\gtest_main.vcxproj.metaproj" (8) on node 1 (default targets). 8>Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googletest\gtest_main.vcxproj.metaproj" (8) is building "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googletest\gtest_main.vcxproj" (17) on node 1 (default targets). 17>PrepareForBuild: Creating directory "gtest_main.dir\Release\". Creating directory "gtest_main.dir\Release\gtest_main.tlog\". InitializeBuildStatus: Creating "gtest_main.dir\Release\gtest_main.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. CustomBuild: Building Custom Rule C:/Jenkins/workspace/dgl_PR-4648/third_party/googletest/googletest/CMakeLists.txt ClCompile: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /Zi /nologo /W4 /WX /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D _UNICODE /D UNICODE /D WIN32 /D _WIN32 /D STRICT /D WIN32_LEAN_AND_MEAN /D GTEST_HAS_PTHREAD=0 /D _HAS_EXCEPTIONS=1 /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D "CMAKE_INTDIR=\"Release\"" /D _UNICODE /D UNICODE /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /Fo"gtest_main.dir\Release\\" /Fd"C:\Jenkins\workspace\dgl_PR-4648\build\bin\Release\gtest_main.pdb" /Gd /TP /wd4251 /wd4275 /wd4702 /errorReport:queue -J "C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\src\gtest_main.cc" 1>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj.metaproj" (3) on node 1 (default targets). 3>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj.metaproj" (3) is building "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj" (18) on node 1 (default targets). 18>PrepareForBuild: Creating directory "C:\Jenkins\workspace\dgl_PR-4648\build\Release\". Creating directory "dgl.dir\Release\dgl.tlog\". InitializeBuildStatus: Creating "dgl.dir\Release\dgl.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. 17>ClCompile: gtest_main.cc 18>CustomBuild: Building Custom Rule C:/Jenkins/workspace/dgl_PR-4648/CMakeLists.txt Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sum_i8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sum_u8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sum_i32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sum_u32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sum_i64.o -- Looking for pthread.h - found -- Performing Test CMAKE_HAVE_LIBC_PTHREAD Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_prod_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_prod_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). MakeDirsForCl: Creating directory "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.dir\Release\src\array\cpu". Creating directory "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.dir\Release\src\graph\transform\cpu". ClCompile: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dlpack\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\phmap" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\xbyak" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include" /I"C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\nanoflann\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\libxsmm\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /I"C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\include" /nologo /W1 /WX- /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D WIN32_LEAN_AND_MEAN /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D __USE_XOPEN2K8 /D DMLC_CORE_USE_CMAKE /D DMLC_USE_CXX11=1 /D "CMAKE_INTDIR=\"Release\"" /D dgl_EXPORTS /D _WINDLL /D _MBCS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /std:c++14 /Fo"dgl.dir\Release\\" /Fd"dgl.dir\Release\vc142.pdb" /Gd /TP /errorReport:queue "C:\Jenkins\workspace\dgl_PR-4648\src\array\array.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\array_arith.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_cumsum.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_index_select.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_nonzero.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_op_impl.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_pack.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_repeat.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_scatter.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_sort.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_coalesce.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_linegraph.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_sort.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_get_data.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_mm.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sort.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sum.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_to_simple.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_union.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\disjoint_union.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\gather_mm.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\negative_sampling.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_sampling.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_topk.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\sddmm.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\segment_reduce.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmm.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\filter.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\kernel.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\array\uvm_array.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\bcast.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\c_api_common.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\geometry\cpu\geometry_op_impl.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\geometry\geometry.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\partition\ndarray_partition.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\random\random.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\c_object_api.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\c_runtime_api.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\config.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\cpu_device_api.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\dlpack_convert.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\dso_module.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\file_util.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\module.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\module_util.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\ndarray.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\object.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\registry.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\resource_manager.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\semaphore_wrapper.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\shared_mem.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\system_lib_module.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\tensordispatch.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\thread_pool.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\threading_backend.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\utils.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\runtime\workspace_pool.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\api\api_container.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\api\api_test.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\creators.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\gk_ops.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_apis.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_traversal.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph_capi.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\immutable_graph.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\network.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\nodeflow.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\pickle.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\negative\global_uniform.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\neighbor\neighbor.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\get_node_types_cpu.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_with_restart_cpu.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalks.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\dglgraph_serialize.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\graph_serialize.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\heterograph_serialize.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\tensor_serialize.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\zerocopy_serializer.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\shared_mem_manager.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\subgraph.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\line_graph.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\remove_edges.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_simple.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\graph\unit_graph.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\scheduler\scheduler.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\scheduler\scheduler_apis.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\common.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\msg_queue.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\socket_communicator.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\socket_pool.cc" "C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\tcp_socket.cc" array.cc array_arith.cc array_cumsum.cc array_index_select.cc array_nonzero.cc array_op_impl.cc array_pack.cc array_repeat.cc array_scatter.cc array_sort.cc coo_coalesce.cc coo_linegraph.cc coo_remove.cc coo_sort.cc csr_get_data.cc csr_mm.cc csr_remove.cc csr_sort.cc csr_sum.cc csr_to_simple.cc csr_union.cc disjoint_union.cc gather_mm.cc negative_sampling.cc rowwise_sampling.cc rowwise_topk.cc sddmm.cc segment_reduce.cc spmat_op_impl_coo.cc spmat_op_impl_csr.cc spmm.cc filter.cc Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sum_u64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sum_f16.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sum_f32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sum_f64.o -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed -- Looking for pthread_create in pthreads nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_prod_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_prod_f64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_prod_i8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_prod_u8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_prod_i32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_prod_u32.o -- Looking for pthread_create in pthreads - not found -- Looking for pthread_create in pthread -- Looking for pthread_create in pthread - found -- Found Threads: TRUE Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_prod_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_min_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). 17>Lib: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\Lib.exe /OUT:"C:\Jenkins\workspace\dgl_PR-4648\build\lib\Release\gtest_main.lib" /NOLOGO /MACHINE:X64 /machine:x64 gtest_main.dir\Release\gtest_main.obj gtest_main.vcxproj -> C:\Jenkins\workspace\dgl_PR-4648\build\lib\Release\gtest_main.lib FinalizeBuildStatus: Deleting file "gtest_main.dir\Release\gtest_main.tlog\unsuccessfulbuild". Touching "gtest_main.dir\Release\gtest_main.tlog\gtest_main.lastbuildstate". 17>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googletest\gtest_main.vcxproj" (default targets). 8>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\googletest\googletest\gtest_main.vcxproj.metaproj" (default targets). CMake Warning at /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:22 (message): static library kineto_LIBRARY-NOTFOUND not found. Call Stack (most recent call first): /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:127 (append_torchlib_if_found) CMakeLists.txt:26 (find_package) -- Found Torch: /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/lib/libtorch.so -- tensoradapter found PyTorch includes: /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/include;/opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/include/torch/csrc/api/include -- tensoradapter found PyTorch lib: torch -- Configured target tensoradapter_pytorch_1.9.0 -- Configuring done -- Generating done CMake Warning: Manually-specified variables were not used by the project: CUDA_TOOLKIT_ROOT_DIR TORCH_CUDA_ARCH_LIST -- Build files have been written to: /root/jenkins/workspace/dgl_PR-4648/tensoradapter/pytorch/build Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_prod_i64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_prod_u64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_prod_f16.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_min_u8.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_min_i32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_min_u32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_min_i64.o Scanning dependencies of target tensoradapter_pytorch_1.9.0 [ 50%] Building CXX object CMakeFiles/tensoradapter_pytorch_1.9.0.dir/torch.cpp.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_prod_f32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_prod_f64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_min_i8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_min_u8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_min_i32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_min_u32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_min_i64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_min_u64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_min_f16.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_min_f32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_min_f64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_min_bf16.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_max_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_min_u64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_min_f16.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_min_f32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_max_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_max_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_max_u32.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_nonzero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_nonzero.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_nonzero.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_nonzero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_cumsum.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_cumsum.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_cumsum.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_cumsum.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_index_select.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_index_select.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_index_select.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_index_select.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_scatter.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_scatter.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_scatter.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_scatter.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_coalesce.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_coalesce.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_coalesce.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_coalesce.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_sort.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_sort.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_sort.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_sort.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_repeat.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_repeat.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_repeat.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_repeat.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_linegraph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_linegraph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_linegraph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_linegraph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] kernel.cc libra_partition.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_pack.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_pack.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_pack.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_pack.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_op_impl.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_op_impl.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_op_impl.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_op_impl.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\gather_mm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\gather_mm.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\gather_mm.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\gather_mm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_sort.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_sort.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_sort.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_sort.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_to_simple.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_to_simple.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_to_simple.cc) nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_max_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_max_u64.o [ 48%] Linking CXX static library ../../../lib/libgtest.a nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_min_f64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_max_i8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_max_u8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_max_i32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_max_u32.o C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_to_simple.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_union.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_union.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_union.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_union.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] uvm_array.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\disjoint_union.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\disjoint_union.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\disjoint_union.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\disjoint_union.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] bcast.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sort.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sort.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sort.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sort.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] c_api_common.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_topk.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_topk.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_topk.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_topk.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_max_i64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_max_u64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_max_f16.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_max_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 48%] Built target gtest Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_max_f32.o [ 49%] Building CXX object third_party/googletest/googlemock/CMakeFiles/gmock.dir/src/gmock-all.cc.o [100%] Linking CXX shared library libtensoradapter_pytorch_1.9.0.so geometry_op_impl.cc geometry.cc ndarray_partition.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\segment_reduce.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\segment_reduce.cc) choice.cc C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\segment_reduce.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\segment_reduce.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\negative_sampling.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\negative_sampling.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\negative_sampling.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\negative_sampling.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] random.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\filter.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\filter.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\filter.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\filter.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_max_f32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_max_f64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_premulsum_i8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_premulsum_u8.o [ 49%] Building CXX object third_party/googletest/googletest/CMakeFiles/gtest_main.dir/src/gtest_main.cc.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_max_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_max_bf16.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_premulsum_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). ================================================================================ LIBXSMM master-1.16.1-1534 (Linux@4e33dd6e703e) -------------------------------------------------------------------------------- GNU Compiler Collection: gcc 7.5.0, and g++ 7.5.0 C / C++ target: -msse4.2 Fortran Compiler is disabled or missing: no Fortran interface is built! -------------------------------------------------------------------------------- --- LIBXSMM build log [100%] Built target tensoradapter_pytorch_1.9.0 'libtensoradapter_pytorch_1.9.0.so' -> '/root/jenkins/workspace/dgl_PR-4648/build/tensoradapter/pytorch/libtensoradapter_pytorch_1.9.0.so' [ 50%] Built target tensoradapter_pytorch c_object_api.cc c_runtime_api.cc config.cc cpu_device_api.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_sampling.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_sampling.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_sampling.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_sampling.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] dlpack_convert.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\array_arith.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\array_arith.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\array_arith.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\array_arith.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] dso_module.cc Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_premulsum_i32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_premulsum_u32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_premulsum_i64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_premulsum_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_premulsum_i32.o file_util.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_get_data.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_get_data.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_get_data.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_get_data.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] module.cc module_util.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4267: 'initializing': conversion from 'size_t' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty2=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ K=int32_t, V=int32_t, IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ K=int32_t, V=int32_t, IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(50): message : while compiling class template member function 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(38): message : see reference to function template instantiation 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc(62): message : see reference to class template instantiation 'dgl::aten::IdHashMap' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc(89): message : see reference to function template instantiation 'void dgl::aten::impl::`anonymous-namespace'::COORemoveShuffled(dgl::aten::COOMatrix,dgl::IdArray,std::vector> *,std::vector> *,std::vector> *)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc(101): message : see reference to function template instantiation 'dgl::aten::COOMatrix dgl::aten::impl::COORemove(dgl::aten::COOMatrix,dgl::IdArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_premulsum_u64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_premulsum_f16.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_premulsum_f32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_premulsum_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_premulsum_i64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_premulsum_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4267: 'initializing': conversion from 'size_t' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty2=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ K=int32_t, V=int32_t, IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ K=int32_t, V=int32_t, IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc) 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_mm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_mm.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(50): message : while compiling class template member function 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_mm.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(38): message : see reference to function template instantiation 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_mm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc(63): message : see reference to class template instantiation 'dgl::aten::IdHashMap' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc(94): message : see reference to function template instantiation 'void dgl::aten::impl::`anonymous-namespace'::CSRRemoveShuffled(dgl::aten::CSRMatrix,dgl::IdArray,std::vector> *,std::vector> *,std::vector> *)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc(106): message : see reference to function template instantiation 'dgl::aten::CSRMatrix dgl::aten::impl::CSRRemove(dgl::aten::CSRMatrix,dgl::IdArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] ndarray.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(4088,24): warning C4244: '=': conversion from 'const _Ty' to 'IdType', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=int64_t ] and [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc(298): message : see reference to function template instantiation 'void std::fill(const _FwdIt,const _FwdIt,const _Ty &)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t, _FwdIt=int32_t *, _Ty=int64_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc(306): message : see reference to function template instantiation 'dgl::aten::COOMatrix dgl::aten::impl::CSRToCOO(dgl::aten::CSRMatrix)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sum.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sum.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sum.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sum.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] object.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4267: 'initializing': conversion from 'size_t' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty2=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ K=int32_t, V=int32_t, IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ K=int32_t, V=int32_t, IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(50): message : while compiling class template member function 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(38): message : see reference to function template instantiation 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc(477): message : see reference to class template instantiation 'dgl::aten::IdHashMap' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc(525): message : see reference to function template instantiation 'dgl::aten::CSRMatrix dgl::aten::impl::CSRSliceMatrix(dgl::aten::CSRMatrix,dgl::runtime::NDArray,dgl::runtime::NDArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\array.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\array.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\array.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\array.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_premulsum_f64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sumpostdiv_i8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sumpostdiv_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_premulsum_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_premulsum_f32.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] registry.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmm.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmm.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\sddmm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\sddmm.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\sddmm.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\sddmm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4267: 'initializing': conversion from 'size_t' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty2=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(50): message : while compiling class template member function 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_utils.h(38): message : see reference to function template instantiation 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc(684): message : see reference to class template instantiation 'dgl::aten::IdHashMap' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc(706): message : see reference to function template instantiation 'dgl::aten::COOMatrix dgl::aten::impl::COOSliceRows(dgl::aten::COOMatrix,dgl::runtime::NDArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_premulsum_f64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_premulsum_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sumpostdiv_i32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sumpostdiv_u32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sumpostdiv_i64.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\numeric(817,20): warning C4244: '=': conversion from '_Ty' to 'IdType', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=int64_t ] and [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc(383): message : see reference to function template instantiation 'void std::iota(_FwdIt,_FwdIt,_Ty)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t, _FwdIt=int32_t *, _Ty=int64_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc(632): message : see reference to function template instantiation 'dgl::aten::CSRMatrix dgl::aten::impl::`anonymous-namespace'::SortedCOOToCSR(const dgl::aten::COOMatrix &)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc(635): message : see reference to function template instantiation 'dgl::aten::CSRMatrix dgl::aten::impl::COOToCSR(dgl::aten::COOMatrix)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4244: 'initializing': conversion from '_Ty' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=__int64 ] and [ _Ty2=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xmemory(671): message : see reference to function template instantiation 'std::pair,IdType>::pair<_Ty,__int64,0>(_Other1 &&,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t, _Ty=std::pair, _Other1=std::pair, _Other2=__int64 ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xmemory(671): message : see reference to function template instantiation 'std::pair,IdType>::pair<_Ty,__int64,0>(_Other1 &&,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t, _Ty=std::pair, _Other1=std::pair, _Other2=__int64 ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\list(587): message : see reference to function template instantiation 'void std::_Default_allocator_traits<_Alloc>::construct<_Ty,std::pair,__int64>(_Alloc &,_Objty *const ,std::pair &&,__int64 &&)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Alloc=std::allocator,int32_t>,std::_Default_allocator_traits,int32_t>>>::void_pointer>>, _Ty=std::pair,int32_t>, IdType=int32_t, _Objty=std::pair,int32_t> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\list(587): message : see reference to function template instantiation 'void std::_Default_allocator_traits<_Alloc>::construct<_Ty,std::pair,__int64>(_Alloc &,_Objty *const ,std::pair &&,__int64 &&)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Alloc=std::allocator,int32_t>,std::_Default_allocator_traits,int32_t>>>::void_pointer>>, _Ty=std::pair,int32_t>, IdType=int32_t, _Objty=std::pair,int32_t> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xhash(599): message : see reference to function template instantiation 'std::_List_node_emplace_op2::void_pointer>>>::_List_node_emplace_op2,__int64>(_Alnode &,std::pair &&,__int64 &&)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=std::pair,int32_t>, _Alloc=std::allocator,int32_t>>, IdType=int32_t, _Alnode=std::allocator,int32_t>,std::_Default_allocator_traits,int32_t>>>::void_pointer>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xhash(599): message : see reference to function template instantiation 'std::_List_node_emplace_op2::void_pointer>>>::_List_node_emplace_op2,__int64>(_Alnode &,std::pair &&,__int64 &&)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=std::pair,int32_t>, _Alloc=std::allocator,int32_t>>, IdType=int32_t, _Alnode=std::allocator,int32_t>,std::_Default_allocator_traits,int32_t>>>::void_pointer>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc(256): message : see reference to function template instantiation 'std::_List_iterator>> std::_Hash,_Alloc,true>>::emplace,int64_t>(std::pair &&,int64_t &&)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=std::pair,int32_t>, _Kty=std::pair, _Hasher=dgl::aten::PairHash, _Keyeq=std::equal_to>, _Alloc=std::allocator,int32_t>>, IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc(256): message : see reference to function template instantiation 'std::_List_iterator>> std::_Hash,_Alloc,true>>::emplace,int64_t>(std::pair &&,int64_t &&)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=std::pair,int32_t>, _Kty=std::pair, _Hasher=dgl::aten::PairHash, _Keyeq=std::equal_to>, _Alloc=std::allocator,int32_t>>, IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc(290): message : see reference to function template instantiation 'std::vector> dgl::aten::impl::COOGetDataAndIndices(dgl::aten::COOMatrix,dgl::runtime::NDArray,dgl::runtime::NDArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sumpostdiv_u64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sumpostdiv_f16.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sumpostdiv_f32.o resource_manager.cc semaphore_wrapper.cc shared_mem.cc Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/broadcast_sumpostdiv_f64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sum_i8.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sum_u8.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sum_i32.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sum_u32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sum_i64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sum_u64.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_f16.o Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_f64.o [ 49%] Linking CXX static library ../../../lib/libgtest_main.a Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sum_f16.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sum_f32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sum_f64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_prod_i8.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_prod_u8.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_prod_i32.o ================================================================================ Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_prod_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_reduce_sumpostdiv_bf16.o [ 49%] Built target gtest_main nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sum_i8.o system_lib_module.cc tensordispatch.cc thread_pool.cc Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sum_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sum_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_prod_i64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_prod_u64.o LIBXSMM master-1.16.1-1534 (Linux@681e11859cd1) -------------------------------------------------------------------------------- GNU Compiler Collection: gcc 7.5.0, and g++ 7.5.0 C / C++ target: -msse4.2 Fortran Compiler is disabled or missing: no Fortran interface is built! -------------------------------------------------------------------------------- --- LIBXSMM build log Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_prod_f16.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_prod_f32.o threading_backend.cc Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_prod_f64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_min_i8.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_min_u8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sum_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sum_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sum_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sum_f16.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_min_i32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_min_u32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_min_i64.o utils.cc Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_min_u64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_min_f16.o [ 46%] Linking CXX static library ../../../lib/libgtest.a Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_min_f32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_min_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sum_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sum_f64.o [ 50%] Linking CXX static library ../../../lib/libgtest.a [ 50%] Built target gtest Scanning dependencies of target gtest_main Scanning dependencies of target gmock [ 50%] Building CXX object third_party/googletest/googletest/CMakeFiles/gtest_main.dir/src/gtest_main.cc.o [ 51%] Building CXX object third_party/googletest/googlemock/CMakeFiles/gmock.dir/src/gmock-all.cc.o workspace_pool.cc 18>C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(396,16): warning C4477: 'fscanf' : format string '%ld' requires an argument of type 'long *', but variadic argument 1 has type 'int64_t *' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(396,16): message : consider using '%lld' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(396,16): message : consider using '%Id' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(396,16): message : consider using '%I64d' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(396,16): warning C4477: 'fscanf' : format string '%ld' requires an argument of type 'long *', but variadic argument 2 has type 'int64_t *' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(396,16): message : consider using '%lld' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(396,16): message : consider using '%Id' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(396,16): message : consider using '%I64d' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\uvm_array.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\uvm_array.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\uvm_array.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\uvm_array.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] [ 46%] Built target gtest Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_max_i8.o Scanning dependencies of target gtest_main Scanning dependencies of target gmock [ 46%] Building CXX object third_party/googletest/googletest/CMakeFiles/gtest_main.dir/src/gtest_main.cc.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_max_u8.o [ 48%] Building CXX object third_party/googletest/googlemock/CMakeFiles/gmock.dir/src/gmock-all.cc.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_max_i32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sum_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_prod_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\c_api_common.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\c_api_common.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\c_api_common.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\c_api_common.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] api_container.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\geometry\cpu\geometry_op_impl.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\geometry\cpu\geometry_op_impl.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\geometry\cpu\geometry_op_impl.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\geometry\cpu\geometry_op_impl.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] api_test.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\partition\ndarray_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\partition\ndarray_partition.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\partition\ndarray_partition.cc) 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\geometry\geometry.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\geometry\geometry.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\partition\ndarray_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\geometry\geometry.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\geometry\geometry.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_max_u32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_max_i64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_max_u64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_prod_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_prod_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_prod_u32.o [ 53%] Linking CXX static library ../../../lib/libgtest_main.a [ 53%] Built target gtest_main creators.cc 18>C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): warning C4477: 'fprintf' : format string '%ld' requires an argument of type 'long', but variadic argument 1 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): message : consider using '%lld' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): message : consider using '%Id' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): message : consider using '%I64d' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(317): message : see reference to function template instantiation 'void dgl::aten::LibraVertexCut(int32_t,dgl::runtime::NDArray,dgl::runtime::NDArray,dgl::runtime::NDArray,dgl::runtime::NDArray,dgl::runtime::NDArray,dgl::runtime::NDArray,dgl::runtime::NDArray,int64_t,int64_t,const std::string &)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): warning C4477: 'fprintf' : format string '%ld' requires an argument of type 'long', but variadic argument 2 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): message : consider using '%lld' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): message : consider using '%Id' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): message : consider using '%I64d' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): warning C4477: 'fprintf' : format string '%ld' requires an argument of type 'long', but variadic argument 3 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): message : consider using '%lld' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): message : consider using '%Id' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): message : consider using '%I64d' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(284,10): warning C4477: 'printf' : format string '%ld' requires an argument of type 'long', but variadic argument 1 has type 'unsigned __int64' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(284,10): message : consider using '%zd' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(287,17): warning C4477: 'fprintf' : format string '%ld' requires an argument of type 'long', but variadic argument 1 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(287,17): message : consider using '%lld' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(287,17): message : consider using '%Id' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(287,17): message : consider using '%I64d' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(291,12): warning C4477: 'printf' : format string '%ld' requires an argument of type 'long', but variadic argument 1 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(291,12): message : consider using '%lld' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(291,12): message : consider using '%Id' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(291,12): message : consider using '%I64d' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(296,12): warning C4477: 'printf' : format string '%ld' requires an argument of type 'long', but variadic argument 1 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(296,12): message : consider using '%lld' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(296,12): message : consider using '%Id' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(296,12): message : consider using '%I64d' in the format string [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\random.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\random.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\random.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\random.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] gk_ops.cc graph.cc Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_max_f16.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_max_f32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_max_f64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_premulsum_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_prod_i64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_prod_u64.o /usr/bin/ar: creating lib/libxsmmnoblas.a nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). graph_apis.cc graph_op.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(4088,24): warning C4244: '=': conversion from 'const _Ty' to 'float', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=int ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc) C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\sample_utils.h(267): message : see reference to function template instantiation 'void std::fill>>,int>(const _FwdIt,const _FwdIt,const int &)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=ValueType, _FwdIt=std::_Vector_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc) C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\sample_utils.h(264): message : while compiling class template member function 'void dgl::utils::TreeSampler::ResetState(dgl::FloatArray)' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int64_t, FloatType=float ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc) C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\sample_utils.h(281): message : see reference to function template instantiation 'void dgl::utils::TreeSampler::ResetState(dgl::FloatArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int64_t, FloatType=float ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc) C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc(42): message : see reference to class template instantiation 'dgl::utils::TreeSampler' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int64_t, FloatType=float ] C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc(51): message : see reference to function template instantiation 'void dgl::RandomEngine::Choice(int64_t,dgl::FloatArray,int64_t *,bool)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] graph_traversal.cc Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_premulsum_u8.o [ 48%] Linking CXX static library libtensorpipe.a Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_premulsum_i32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_premulsum_u32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_prod_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_prod_f32.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] heterograph.cc heterograph_capi.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\kernel.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\kernel.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\kernel.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\kernel.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_premulsum_i64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_premulsum_u64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_premulsum_f16.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_prod_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_prod_bf16.o immutable_graph.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] metis_partition.cc network.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(51,1): warning C4005: 'INT32_MIN': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(75): message : see previous definition of 'INT32_MIN' (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(52,1): warning C4005: 'INT64_MIN': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(77): message : see previous definition of 'INT64_MIN' (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(55,1): warning C4005: 'INT32_MAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(76): message : see previous definition of 'INT32_MAX' (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(56,1): warning C4005: 'INT64_MAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(78): message : see previous definition of 'INT64_MAX' (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] nodeflow.cc pickle.cc Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_premulsum_f32.o [ 48%] Built target tensorpipe Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_premulsum_f64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_min_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_min_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_min_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sumpostdiv_i8.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sumpostdiv_u8.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sumpostdiv_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_min_u32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_min_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). sampler.cc global_uniform.cc neighbor.cc get_node_types_cpu.cc Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sumpostdiv_u32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sumpostdiv_i64.o [ 48%] Linking CXX static library ../../../lib/libgtest_main.a Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sumpostdiv_u64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_min_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). ================================================================================ Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sumpostdiv_f16.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sumpostdiv_f32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_sumpostdiv_f64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sum_i8.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sum_u8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_min_f16.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_min_f32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_min_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_min_bf16.o LIBXSMM master-1.16.1-1534 (Linux@856b09e8bd12) -------------------------------------------------------------------------------- GNU Compiler Collection: gcc 7.5.0, and g++ 7.5.0 C / C++ target: -msse4.2 Fortran Compiler is disabled or missing: no Fortran interface is built! -------------------------------------------------------------------------------- --- LIBXSMM build log Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sum_i32.o [ 48%] Built target gtest_main Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sum_u32.o /usr/bin/ar: creating lib/libxsmmnoblas.a Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sum_i64.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_max_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_max_u8.o node2vec.cc node2vec_cpu.cc Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sum_u64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sum_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_max_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_max_u32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sum_f32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sum_f64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_prod_i8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_max_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_max_u64.o randomwalk_cpu.cc randomwalk_with_restart_cpu.cc randomwalks.cc dglgraph_serialize.cc Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_prod_u8.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_prod_i32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_prod_u32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_prod_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_max_f16.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_max_f32.o graph_serialize.cc heterograph_serialize.cc tensor_serialize.cc [ 53%] Linking CXX static library ../../../lib/libgmock.a [ 53%] Built target gmock /usr/bin/ar: creating lib/libxsmmnoblas.a Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_prod_u64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_prod_f16.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_prod_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_max_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_max_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). zerocopy_serializer.cc shared_mem_manager.cc Scanning dependencies of target gmock_main [ 53%] Building CXX object third_party/googletest/googlemock/CMakeFiles/gmock_main.dir/src/gmock_main.cc.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_prod_f64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_min_i8.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_min_u8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_premulsum_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\runtime\utils.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\runtime\utils.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\aten\./array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\runtime\utils.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl/aten/coo.h(235): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\runtime\utils.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_premulsum_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 49%] Linking CXX static library ../../../lib/libgmock.a nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_premulsum_i32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_premulsum_u32.o [ 49%] Built target gmock Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_min_i32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_min_u32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_min_i64.o subgraph.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\creators.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\creators.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\creators.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\creators.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\gk_ops.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\gk_ops.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\gk_ops.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\gk_ops.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] [ 53%] Linking CXX static library libtensorpipe.a [ 53%] Built target tensorpipe compact.cc line_graph.cc metis_partition_hetero.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_traversal.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_traversal.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_traversal.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_traversal.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 49%] Building CXX object third_party/googletest/googlemock/CMakeFiles/gmock_main.dir/src/gmock_main.cc.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_premulsum_i64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_min_u64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_min_f16.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_min_f32.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\get_node_types_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\get_node_types_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\get_node_types_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\get_node_types_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_premulsum_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_premulsum_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_min_f64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_max_i8.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] partition_hetero.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\nodeflow.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\nodeflow.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\nodeflow.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\nodeflow.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_apis.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_apis.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_apis.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_apis.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] remove_edges.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\negative\global_uniform.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\negative\global_uniform.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\negative\global_uniform.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\negative\global_uniform.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] to_bipartite.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_premulsum_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_premulsum_f64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_max_u8.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_max_i32.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\immutable_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\immutable_graph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\immutable_graph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\immutable_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const unsigned __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const unsigned __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=uint64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc(511): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc(414): message : see reference to function template instantiation 'dgl::FlattenedHeteroGraphPtr dgl::HeteroGraph::FlattenImpl(const std::vector> &) const' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\neighbor\neighbor.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\neighbor\neighbor.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\neighbor\neighbor.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\neighbor\neighbor.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] to_simple.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_premulsum_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_max_u32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_max_i64.o unit_graph.cc 18>C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc(1526,1): warning C4805: '==': unsafe mix of type 'int64_t' and type 'bool' in operation [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\pickle.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\pickle.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\pickle.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\pickle.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] scheduler.cc [ 54%] Linking CXX static library ../../../lib/libgmock_main.a scheduler_apis.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xstddef(240,54): warning C4018: '<': signed/unsigned mismatch (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xstddef(239): message : while compiling class template member function 'bool std::less::operator ()(_Ty1,_Ty2) noexcept() const' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=dgl::dgl_id_t, IdxType=int64_t, _Ty1=const dgl::dgl_id_t &, _Ty2=const int64_t & ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\algorithm(2951): message : see reference to function template instantiation 'bool std::binary_search<_FwdIt,_Ty,std::less>(_FwdIt,_FwdIt,const _Ty &,_Pr)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _FwdIt=const int64_t *, _Ty=dgl::dgl_id_t, _Pr=std::less ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_randomwalk.h(45): message : see reference to function template instantiation 'bool std::binary_search(_FwdIt,_FwdIt,const _Ty &)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int64_t, _FwdIt=const int64_t *, _Ty=dgl::dgl_id_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_randomwalk.h(109): message : see reference to function template instantiation 'bool dgl::sampling::impl::`anonymous-namespace'::has_edge_between(const dgl::aten::CSRMatrix &,dgl::dgl_id_t,dgl::dgl_id_t)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int64_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_randomwalk.h(163): message : see reference to function template instantiation 'std::tuple dgl::sampling::impl::`anonymous-namespace'::Node2vecRandomWalkStep(IdxType *,dgl::dgl_id_t,dgl::dgl_id_t,const double,const double,int64_t,const dgl::aten::CSRMatrix &,bool,const dgl::FloatArray &,std::function)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int64_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc(31): message : see reference to function template instantiation 'std::pair dgl::sampling::impl::`anonymous-namespace'::Node2vecRandomWalk(const dgl::HeteroGraphPtr,const dgl::IdArray,const double,const double,const int64_t,const dgl::FloatArray &,std::function)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int64_t ] C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc(45): message : see reference to function template instantiation 'std::pair dgl::sampling::impl::Node2vec(const dgl::HeteroGraphPtr,const dgl::IdArray,const double,const double,const int64_t,const dgl::FloatArray &)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc(1751,1): warning C4805: '==': unsafe mix of type 'int64_t' and type 'bool' in operation [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc(1693): message : while compiling class template member function 'void dgl::WeightedEdgeSamplerObject::Fetch(dgl::runtime::DGLRetValue *)' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc(1925): message : see reference to function template instantiation 'void dgl::WeightedEdgeSamplerObject::Fetch(dgl::runtime::DGLRetValue *)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc(1842): message : see reference to class template instantiation 'dgl::WeightedEdgeSamplerObject' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xstddef(242,43): warning C4018: '<': signed/unsigned mismatch (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\algorithm(2945): message : see reference to function template instantiation 'bool std::less::operator ()(_Ty1,_Ty2) noexcept const' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=dgl::dgl_id_t, IdxType=int64_t, _Ty1=const dgl::dgl_id_t &, _Ty2=const int64_t & ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) common.cc Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sumpostdiv_i8.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sumpostdiv_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sumpostdiv_i32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sumpostdiv_u32.o Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sumpostdiv_i64.o [ 54%] Built target gmock_main 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const unsigned __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const unsigned __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=uint64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc(400): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sumpostdiv_u64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_max_u64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_max_f16.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_max_f32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_max_f64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_premulsum_i8.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_premulsum_u8.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_premulsum_i32.o [ 54%] Linking CXX static library libdmlc.a msg_queue.cc socket_communicator.cc nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sumpostdiv_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sumpostdiv_f32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_premulsum_u32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_premulsum_i64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_premulsum_u64.o [ 54%] Built target dmlc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_with_restart_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_with_restart_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_with_restart_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_with_restart_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalks.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalks.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalks.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalks.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] socket_pool.cc tcp_socket.cc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\network.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sumpostdiv_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling all_gather.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/all_gather_sumpostdiv_bf16.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_premulsum_f16.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_premulsum_f32.o ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\network.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\network.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\network.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\tensor_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\tensor_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\tensor_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\tensor_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph_capi.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph_capi.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph_capi.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph_capi.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(207,16): warning C4244: 'initializing': conversion from '_Ty' to '_Ty1', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=std::chrono::system_clock::rep ] and [ _Ty1=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xmemory(671): message : see reference to function template instantiation 'std::pair::pair<__int64,int,0>(std::pair<__int64,int> &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xmemory(671): message : see reference to function template instantiation 'std::pair::pair<__int64,int,0>(std::pair<__int64,int> &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\vector(687): message : see reference to function template instantiation 'void std::_Default_allocator_traits<_Alloc>::construct<_Ty,std::pair<__int64,int>>(_Alloc &,_Objty *const ,std::pair<__int64,int> &&)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Alloc=std::allocator>, _Ty=std::pair, _Objty=std::pair ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\vector(687): message : see reference to function template instantiation 'void std::_Default_allocator_traits<_Alloc>::construct<_Ty,std::pair<__int64,int>>(_Alloc &,_Objty *const ,std::pair<__int64,int> &&)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Alloc=std::allocator>, _Ty=std::pair, _Objty=std::pair ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\vector(705): message : see reference to function template instantiation 'void std::vector,std::allocator>>::_Emplace_back_with_unused_capacity<_Ty>(_Ty &&)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int32_t, _Ty=std::pair<__int64,int> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc(61): message : see reference to function template instantiation 'void std::vector,std::allocator>>::emplace_back>(std::pair<__int64,int> &&)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdxType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc(127): message : see reference to function template instantiation 'std::tuple dgl::sampling::impl::SelectPinSageNeighbors(const dgl::IdArray,const dgl::IdArray,const int64_t,const int64_t)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sum_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_premulsum_f64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_i8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sum_u8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sum_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_u8.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sum_u32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sum_i64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_u32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sum_u64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_u64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_f16.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\dglgraph_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\dglgraph_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\aten\./array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\dglgraph_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl/aten/coo.h(235): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\dglgraph_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,1): warning C4267: '=': conversion from 'size_t' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const unsigned __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=unsigned __int64, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc(803): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\graph_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\graph_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\graph_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\graph_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] /usr/bin/ar: creating lib/libxsmmgen.a nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sum_f16.o [ 49%] Linking CXX static library ../../../lib/libgmock_main.a Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sum_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_f32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_f64.o 18>C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(75,1): warning C4005: 'INT32_MIN': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(51): message : see previous definition of 'INT32_MIN' (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(76,1): warning C4005: 'INT32_MAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(55): message : see previous definition of 'INT32_MAX' (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(77,1): warning C4005: 'INT64_MIN': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(52): message : see previous definition of 'INT64_MIN' (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(78,1): warning C4005: 'INT64_MAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(56): message : see previous definition of 'INT64_MAX' (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\heterograph_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\heterograph_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\heterograph_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\heterograph_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\shared_mem_manager.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\shared_mem_manager.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\shared_mem_manager.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\shared_mem_manager.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] [ 49%] Built target gmock_main Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sum_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sum_bf16.o Compiling functions.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/functions.o Compiling onerank_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/onerank_reduce.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\subgraph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\subgraph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\subgraph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\subgraph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_prod_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sum_i8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sum_u8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_prod_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_prod_i32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sum_i32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sum_u32.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\line_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\line_graph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\line_graph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\line_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_prod_u32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_prod_i64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sum_i64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sum_u64.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\remove_edges.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\remove_edges.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\remove_edges.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\remove_edges.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\tuple(632,57): warning C4244: '=': conversion from '_Ty' to 'IdType', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=dgl::dgl_id_t ] and [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc(63): message : see reference to function template instantiation 'std::tuple &std::tuple::operator =(std::pair &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc(63): message : see reference to function template instantiation 'std::tuple &std::tuple::operator =(std::pair &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc(146): message : see reference to function template instantiation 'std::pair>,std::vector>> dgl::transform::`anonymous-namespace'::CompactGraphsCPU(const std::vector> &,const std::vector> &)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\tuple(633,58): warning C4244: '=': conversion from '_Ty' to 'IdType', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=dgl::dgl_id_t ] and [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4244: 'initializing': conversion from '_Ty' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty=dgl::dgl_id_t ] and [ _Ty2=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\../../array/cpu/array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ K=int32_t, V=int32_t, IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\../../array/cpu/array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ K=int32_t, V=int32_t, IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\../../array/cpu/array_utils.h(50): message : while compiling class template member function 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc(80): message : see reference to function template instantiation 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc(74): message : see reference to class template instantiation 'dgl::aten::IdHashMap' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_prod_u64.o [ 49%] Linking CXX static library libtensorpipe.a Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_prod_f16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sum_f16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sum_f32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sum_f64.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_simple.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_simple.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_simple.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_simple.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const unsigned __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const unsigned __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=uint64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc(106): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\scheduler\scheduler_apis.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\scheduler\scheduler_apis.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\scheduler\scheduler_apis.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\scheduler\scheduler_apis.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\unit_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\unit_graph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\unit_graph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\unit_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_prod_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_prod_f64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_prod_i8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_prod_u8.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4267: 'initializing': conversion from 'size_t' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _Ty2=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\../../array/cpu/array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ K=int32_t, V=int32_t, IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\../../array/cpu/array_utils.h(57): message : see reference to function template instantiation 'std::pair::pair(_Other1,_Other2 &&) noexcept' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ K=int32_t, V=int32_t, IdType=int32_t, _Other1=const int32_t &, _Other2=size_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\../../array/cpu/array_utils.h(50): message : while compiling class template member function 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc) C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc(80): message : see reference to function template instantiation 'void dgl::aten::IdHashMap::Update(dgl::IdArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc(80): message : see reference to class template instantiation 'dgl::aten::IdHashMap' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ IdType=int32_t ] C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc(150): message : see reference to function template instantiation 'std::tuple>> dgl::transform::`anonymous-namespace'::ToBlockCPU(dgl::HeteroGraphPtr,const std::vector> &,bool,std::vector> *const )' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 49%] Built target tensorpipe Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_prod_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_prod_i32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_prod_u32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_prod_i64.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\socket_communicator.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\socket_communicator.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\socket_communicator.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\socket_communicator.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_min_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_min_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_prod_u64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_prod_f16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_prod_f32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_min_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_min_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_prod_f64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_min_i8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_min_u8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_min_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_min_u64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_min_i32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_min_u32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_min_i64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_min_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_min_u64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_min_f16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_min_f32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_min_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_min_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_min_f64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_max_i8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_min_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_max_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_max_u8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_max_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_max_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_max_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_max_u32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_max_i64.o C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dlpack\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\phmap" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\xbyak" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include" /I"C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\nanoflann\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\libxsmm\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /I"C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\include" /nologo /W1 /WX- /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D WIN32_LEAN_AND_MEAN /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D __USE_XOPEN2K8 /D DMLC_CORE_USE_CMAKE /D DMLC_USE_CXX11=1 /D "CMAKE_INTDIR=\"Release\"" /D dgl_EXPORTS /D _WINDLL /D _MBCS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /std:c++14 /Fo"dgl.dir\Release\/src/array/cpu/traversal.cc.obj" /Fd"dgl.dir\Release\vc142.pdb" /Gd /TP /errorReport:queue "C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\traversal.cc" traversal.cc Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_max_u32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_max_u64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_max_f16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_max_f32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_max_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_max_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_max_f64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_premulsum_i8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_premulsum_u8.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_max_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_max_f32.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_premulsum_i32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_premulsum_u32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_max_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_max_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_premulsum_i64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_premulsum_u64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_premulsum_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_premulsum_u8.o C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dlpack\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\phmap" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\xbyak" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include" /I"C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\nanoflann\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\libxsmm\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /I"C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\include" /nologo /W1 /WX- /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D WIN32_LEAN_AND_MEAN /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D __USE_XOPEN2K8 /D DMLC_CORE_USE_CMAKE /D DMLC_USE_CXX11=1 /D "CMAKE_INTDIR=\"Release\"" /D dgl_EXPORTS /D _WINDLL /D _MBCS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /std:c++14 /Fo"dgl.dir\Release\/src/array/union_partition.cc.obj" /Fd"dgl.dir\Release\vc142.pdb" /Gd /TP /errorReport:queue "C:\Jenkins\workspace\dgl_PR-4648\src\array\union_partition.cc" union_partition.cc Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_premulsum_f16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_premulsum_f32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_premulsum_f64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_premulsum_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_premulsum_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_i8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_u8.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_i32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_premulsum_i64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_premulsum_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_u32.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_i64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_u64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_premulsum_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_f16.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_f32.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_premulsum_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_premulsum_f64.o Compiling sendrecv.cu > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/sendrecv_sumpostdiv_f64.o C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dlpack\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\phmap" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\xbyak" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include" /I"C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\nanoflann\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\libxsmm\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /I"C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\include" /nologo /W1 /WX- /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D WIN32_LEAN_AND_MEAN /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D __USE_XOPEN2K8 /D DMLC_CORE_USE_CMAKE /D DMLC_USE_CXX11=1 /D "CMAKE_INTDIR=\"Release\"" /D dgl_EXPORTS /D _WINDLL /D _MBCS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /std:c++14 /Fo"dgl.dir\Release\/src/graph/transform/cpu/knn.cc.obj" /Fd"dgl.dir\Release\vc142.pdb" /Gd /TP /errorReport:queue "C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\cpu\knn.cc" knn.cc nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_premulsum_bf16.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sumpostdiv_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sumpostdiv_u8.o /usr/bin/ar: creating lib/libxsmmgen.a nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /usr/bin/ar: creating lib/libxsmm.a /usr/bin/ar: creating lib/libxsmmext.a ================================================================================ LIBXSMM master-1.16.1-1534 (Linux@856b09e8bd12) -------------------------------------------------------------------------------- GNU Compiler Collection: gcc 7.5.0, and g++ 7.5.0 C / C++ target: -msse4.2 Fortran Compiler is disabled or missing: no Fortran interface is built! -------------------------------------------------------------------------------- BLAS dependency and fallback is removed! -------------------------------------------------------------------------------- [ 54%] Built target libxsmm Scanning dependencies of target dgl [ 54%] Building CXX object CMakeFiles/dgl.dir/src/array/array.cc.o [ 54%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_remove.cc.o [ 54%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_scatter.cc.o [ 54%] Building CXX object CMakeFiles/dgl.dir/src/array/array_arith.cc.o [ 54%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_cumsum.cc.o [ 54%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_remove.cc.o [ 54%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_op_impl.cc.o [ 54%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_pack.cc.o [ 54%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_sort.cc.o [ 54%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_sort.cc.o [ 56%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_sum.cc.o [ 56%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_mm.cc.o [ 56%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_union.cc.o [ 58%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_index_select.cc.o [ 58%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_get_data.cc.o [ 59%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_repeat.cc.o [ 60%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/gather_mm.cc.o [ 60%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/rowwise_topk.cc.o [ 60%] Building CXX object CMakeFiles/dgl.dir/src/geometry/cpu/geometry_op_impl.cc.o [ 60%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/spmat_op_impl_coo.cc.o [ 60%] Building CXX object CMakeFiles/dgl.dir/src/random/cpu/choice.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/runtime/dso_module.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/sddmm.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/spmat_op_impl_csr.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/negative_sampling.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/rowwise_sampling.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/segment_reduce.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/uvm_array.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/c_api_common.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/traversal.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/geometry/geometry.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/union_partition.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/runtime/module_util.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_sort.cc.o [ 64%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/spmm.cc.o [ 64%] Building CXX object CMakeFiles/dgl.dir/src/runtime/c_object_api.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/runtime/c_runtime_api.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/random/random.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/runtime/config.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/libra_partition.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/filter.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/kernel.cc.o [ 67%] Building CXX object CMakeFiles/dgl.dir/src/runtime/registry.cc.o [ 67%] Building CXX object CMakeFiles/dgl.dir/src/bcast.cc.o [ 67%] Building CXX object CMakeFiles/dgl.dir/src/runtime/semaphore_wrapper.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/runtime/ndarray.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/runtime/shared_mem.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/runtime/object.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/runtime/cpu_device_api.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/runtime/file_util.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/runtime/dlpack_convert.cc.o [ 70%] Building CXX object CMakeFiles/dgl.dir/src/partition/ndarray_partition.cc.o [ 70%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_nonzero.cc.o [ 70%] Building CXX object CMakeFiles/dgl.dir/src/runtime/system_lib_module.cc.o [ 70%] Building CXX object CMakeFiles/dgl.dir/src/runtime/threading_backend.cc.o [ 70%] Building CXX object CMakeFiles/dgl.dir/src/runtime/thread_pool.cc.o [ 70%] Building CXX object CMakeFiles/dgl.dir/src/api/api_container.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/runtime/module.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/runtime/workspace_pool.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/api/api_test.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_coalesce.cc.o [ 72%] Building CXX object CMakeFiles/dgl.dir/src/runtime/tensordispatch.cc.o [ 74%] Building CXX object CMakeFiles/dgl.dir/src/runtime/resource_manager.cc.o [ 75%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_linegraph.cc.o [ 75%] Building CXX object CMakeFiles/dgl.dir/src/runtime/utils.cc.o [ 76%] Building CXX object CMakeFiles/dgl.dir/src/graph/pickle.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/graph/metis_partition.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/graph/immutable_graph.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_to_simple.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/disjoint_union.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/graph/network.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/graph/heterograph.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph_apis.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph_op.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/heterograph_serialize.cc.o [ 80%] Building CXX object CMakeFiles/dgl.dir/src/graph/creators.cc.o [ 80%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph_traversal.cc.o [ 80%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/negative/global_uniform.cc.o [ 80%] Building CXX object CMakeFiles/dgl.dir/src/graph/nodeflow.cc.o [ 81%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/get_node_types_cpu.cc.o [ 81%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampler.cc.o [ 81%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/node2vec.cc.o [ 81%] Building CXX object CMakeFiles/dgl.dir/src/graph/heterograph_capi.cc.o [ 82%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/randomwalk_with_restart_cpu.cc.o [ 82%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/randomwalks.cc.o [ 82%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/randomwalk_cpu.cc.o [ 82%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/node2vec_cpu.cc.o [ 82%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/neighbor/neighbor.cc.o [ 82%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/dglgraph_serialize.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/gk_ops.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/zerocopy_serializer.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/tensor_serialize.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/compact.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/graph_serialize.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/subgraph.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/knn.cc.o [ 86%] Building CXX object CMakeFiles/dgl.dir/src/graph/shared_mem_manager.cc.o [ 86%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/cpu/knn.cc.o [ 86%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/metis_partition_hetero.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/remove_edges.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/partition_hetero.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/tcp_socket.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/union_partition.cc.o [ 88%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/common.cc.o [ 88%] Building CXX object CMakeFiles/dgl.dir/src/scheduler/scheduler_apis.cc.o [ 88%] Building CXX object CMakeFiles/dgl.dir/src/scheduler/scheduler.cc.o [ 88%] Building CXX object CMakeFiles/dgl.dir/src/graph/unit_graph.cc.o [ 88%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/to_bipartite.cc.o [ 90%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/socket_pool.cc.o [ 90%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/msg_queue.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/graph/traversal.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/socket_communicator.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/line_graph.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/rpc/rpc.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/to_simple.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/rpc/tensorpipe/tp_communicator.cc.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sumpostdiv_i32.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sumpostdiv_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sumpostdiv_i64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sumpostdiv_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sumpostdiv_f16.o /usr/bin/ar: creating lib/libxsmmgen.a Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sumpostdiv_f32.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] [ 48%] Linking CXX static library ../../../lib/libgmock.a nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sumpostdiv_f64.o Compiling broadcast.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/broadcast_sumpostdiv_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 48%] Built target gmock Scanning dependencies of target gmock_main Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sum_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sum_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dlpack\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\phmap" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\xbyak" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include" /I"C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\nanoflann\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\libxsmm\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /I"C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\include" /nologo /W1 /WX- /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D WIN32_LEAN_AND_MEAN /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D __USE_XOPEN2K8 /D DMLC_CORE_USE_CMAKE /D DMLC_USE_CXX11=1 /D "CMAKE_INTDIR=\"Release\"" /D dgl_EXPORTS /D _WINDLL /D _MBCS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /std:c++14 /Fo"dgl.dir\Release\/src/graph/transform/knn.cc.obj" /Fd"dgl.dir\Release\vc142.pdb" /Gd /TP /errorReport:queue "C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\knn.cc" knn.cc [ 48%] Building CXX object third_party/googletest/googlemock/CMakeFiles/gmock_main.dir/src/gmock_main.cc.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sum_i32.o [ 49%] Linking CXX static library libdmlc.a Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sum_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sum_i64.o [ 48%] Linking CXX static library libdmlc.a [ 49%] Built target dmlc Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sum_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 48%] Built target dmlc 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sum_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sum_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dlpack\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\phmap" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\xbyak" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include" /I"C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\nanoflann\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\libxsmm\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /I"C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\include" /nologo /W1 /WX- /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D WIN32_LEAN_AND_MEAN /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D __USE_XOPEN2K8 /D DMLC_CORE_USE_CMAKE /D DMLC_USE_CXX11=1 /D "CMAKE_INTDIR=\"Release\"" /D dgl_EXPORTS /D _WINDLL /D _MBCS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /std:c++14 /Fo"dgl.dir\Release\/src/graph/transform/union_partition.cc.obj" /Fd"dgl.dir\Release\vc142.pdb" /Gd /TP /errorReport:queue "C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\union_partition.cc" nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sum_f64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sum_bf16.o union_partition.cc nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_prod_i8.o /root/jenkins/workspace/dgl_PR-4648/src/partition/ndarray_partition.cc: In member function 'virtual int64_t dgl::partition::RangePartition::PartSize(int) const': /root/jenkins/workspace/dgl_PR-4648/src/partition/ndarray_partition.cc:202:3: warning: control reaches end of non-void function [-Wreturn-type] } ^ In file included from /root/jenkins/workspace/dgl_PR-4648/src/geometry/../c_api_common.h:10:0, from /root/jenkins/workspace/dgl_PR-4648/src/geometry/geometry.cc:9: /root/jenkins/workspace/dgl_PR-4648/include/dgl/runtime/packed_func.h:502:21: warning: inline function 'TObjectRef dgl::runtime::DGLArgValue::AsObjectRef() const [with TObjectRef = dgl::HeteroGraphRef]' used but never defined inline TObjectRef AsObjectRef() const; ^~~~~~~~~~~ nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_prod_u8.o In file included from /root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core/include/dmlc/logging.h:132:0, from /root/jenkins/workspace/dgl_PR-4648/include/dgl/./runtime/object.h:9, from /root/jenkins/workspace/dgl_PR-4648/include/dgl/graph_interface.h:15, from /root/jenkins/workspace/dgl_PR-4648/include/dgl/sampler.h:13, from /root/jenkins/workspace/dgl_PR-4648/src/graph/sampler.cc:6: /root/jenkins/workspace/dgl_PR-4648/src/graph/sampler.cc: In member function 'dgl::NegSubgraph dgl::{anonymous}::EdgeSamplerObject::genNegEdgeSubgraph(const dgl::Subgraph&, const string&, int64_t, bool, bool)': /root/jenkins/workspace/dgl_PR-4648/src/graph/sampler.cc:1189:48: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] assert(prev_neg_offset + neg_sample_size == neg_vids.size()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648/src/graph/sampler.cc:1193:48: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] assert(prev_neg_offset + neg_sample_size == neg_vids.size()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~ Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_prod_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_prod_u32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_prod_i64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_prod_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dlpack\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\phmap" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\xbyak" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include" /I"C:\Jenkins\workspace\dgl_PR-4648\tensoradapter\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\nanoflann\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\libxsmm\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /I"C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\include" /nologo /W1 /WX- /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D WIN32_LEAN_AND_MEAN /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D __USE_XOPEN2K8 /D DMLC_CORE_USE_CMAKE /D DMLC_USE_CXX11=1 /D "CMAKE_INTDIR=\"Release\"" /D dgl_EXPORTS /D _WINDLL /D _MBCS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /std:c++14 /Fo"dgl.dir\Release\/src/graph/traversal.cc.obj" /Fd"dgl.dir\Release\vc142.pdb" /Gd /TP /errorReport:queue "C:\Jenkins\workspace\dgl_PR-4648\src\graph\traversal.cc" traversal.cc Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_prod_f16.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_prod_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /root/jenkins/workspace/dgl_PR-4648/src/array/libra_partition.cc: In function 'dgl::runtime::List dgl::aten::Libra2dglBuildDict(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, int32_t, int32_t, int64_t, const string&)': /root/jenkins/workspace/dgl_PR-4648/src/array/libra_partition.cc:396:11: warning: ignoring return value of 'int fscanf(FILE*, const char*, ...)', declared with attribute warn_unused_result [-Wunused-result] fscanf(fp, "%ld,%ld,%f\n", &u, &v, &w); // reading an edge - the src and dst global node IDs ~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_prod_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /root/jenkins/workspace/dgl_PR-4648/src/array/libra_partition.cc: In function 'int32_t dgl::aten::Ver2partition(IdType, int64_t*, int32_t) [with IdType = long int]': /root/jenkins/workspace/dgl_PR-4648/src/array/libra_partition.cc:43:1: warning: control reaches end of non-void function [-Wreturn-type] } ^ Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_prod_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_min_i8.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_min_u8.o 18>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_min_i32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_min_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). 18>PreLinkEvent: Auto build dll exports setlocal cd C:\Jenkins\workspace\dgl_PR-4648\build if %errorlevel% neq 0 goto :cmEnd C: if %errorlevel% neq 0 goto :cmEnd "C:\Program Files\CMake\bin\cmake.exe" -E __create_def C:/Jenkins/workspace/dgl_PR-4648/build/dgl.dir/Release/exports.def C:/Jenkins/workspace/dgl_PR-4648/build/dgl.dir/Release//objects.txt if %errorlevel% neq 0 goto :cmEnd :cmEnd endlocal & call :cmErrorLevel %errorlevel% & goto :cmDone :cmErrorLevel exit /b %1 :cmDone if %errorlevel% neq 0 goto :VCEnd :VCEnd Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_min_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_min_u64.o Link: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\link.exe /ERRORREPORT:QUEUE /OUT:"C:\Jenkins\workspace\dgl_PR-4648\build\Release\dgl.dll" /INCREMENTAL:NO /NOLOGO "third_party\dmlc-core\Release\dmlc.lib" kernel32.lib user32.lib gdi32.lib winspool.lib shell32.lib ole32.lib oleaut32.lib uuid.lib comdlg32.lib advapi32.lib /DEF:"C:/Jenkins/workspace/dgl_PR-4648/build/dgl.dir/Release/exports.def" /MANIFEST /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /manifest:embed /PDB:"C:/Jenkins/workspace/dgl_PR-4648/build/Release/dgl.pdb" /SUBSYSTEM:CONSOLE /TLBID:1 /DYNAMICBASE /NXCOMPAT /IMPLIB:"C:/Jenkins/workspace/dgl_PR-4648/build/Release/dgl.lib" /MACHINE:X64 /machine:x64 /DLL dgl.dir\Release\array.obj dgl.dir\Release\array_arith.obj dgl.dir\Release\array_cumsum.obj dgl.dir\Release\array_index_select.obj dgl.dir\Release\array_nonzero.obj dgl.dir\Release\array_op_impl.obj dgl.dir\Release\array_pack.obj dgl.dir\Release\array_repeat.obj dgl.dir\Release\array_scatter.obj dgl.dir\Release\array_sort.obj dgl.dir\Release\coo_coalesce.obj dgl.dir\Release\coo_linegraph.obj dgl.dir\Release\coo_remove.obj dgl.dir\Release\coo_sort.obj dgl.dir\Release\csr_get_data.obj dgl.dir\Release\csr_mm.obj dgl.dir\Release\csr_remove.obj dgl.dir\Release\csr_sort.obj dgl.dir\Release\csr_sum.obj dgl.dir\Release\csr_to_simple.obj dgl.dir\Release\csr_union.obj dgl.dir\Release\disjoint_union.obj dgl.dir\Release\gather_mm.obj dgl.dir\Release\negative_sampling.obj dgl.dir\Release\rowwise_sampling.obj dgl.dir\Release\rowwise_topk.obj dgl.dir\Release\sddmm.obj dgl.dir\Release\segment_reduce.obj dgl.dir\Release\spmat_op_impl_coo.obj dgl.dir\Release\spmat_op_impl_csr.obj dgl.dir\Release\spmm.obj dgl.dir\Release\filter.obj dgl.dir\Release\kernel.obj dgl.dir\Release\libra_partition.obj dgl.dir\Release\uvm_array.obj dgl.dir\Release\bcast.obj dgl.dir\Release\c_api_common.obj dgl.dir\Release\geometry_op_impl.obj dgl.dir\Release\geometry.obj dgl.dir\Release\ndarray_partition.obj dgl.dir\Release\choice.obj dgl.dir\Release\random.obj dgl.dir\Release\c_object_api.obj dgl.dir\Release\c_runtime_api.obj dgl.dir\Release\config.obj dgl.dir\Release\cpu_device_api.obj dgl.dir\Release\dlpack_convert.obj dgl.dir\Release\dso_module.obj dgl.dir\Release\file_util.obj dgl.dir\Release\module.obj dgl.dir\Release\module_util.obj dgl.dir\Release\ndarray.obj dgl.dir\Release\object.obj dgl.dir\Release\registry.obj dgl.dir\Release\resource_manager.obj dgl.dir\Release\semaphore_wrapper.obj dgl.dir\Release\shared_mem.obj dgl.dir\Release\system_lib_module.obj dgl.dir\Release\tensordispatch.obj dgl.dir\Release\thread_pool.obj dgl.dir\Release\threading_backend.obj dgl.dir\Release\utils.obj dgl.dir\Release\workspace_pool.obj dgl.dir\Release\api_container.obj dgl.dir\Release\api_test.obj dgl.dir\Release\creators.obj dgl.dir\Release\gk_ops.obj dgl.dir\Release\graph.obj dgl.dir\Release\graph_apis.obj dgl.dir\Release\graph_op.obj dgl.dir\Release\graph_traversal.obj dgl.dir\Release\heterograph.obj dgl.dir\Release\heterograph_capi.obj dgl.dir\Release\immutable_graph.obj dgl.dir\Release\metis_partition.obj dgl.dir\Release\network.obj dgl.dir\Release\nodeflow.obj dgl.dir\Release\pickle.obj dgl.dir\Release\sampler.obj dgl.dir\Release\global_uniform.obj dgl.dir\Release\neighbor.obj dgl.dir\Release\get_node_types_cpu.obj dgl.dir\Release\node2vec.obj dgl.dir\Release\node2vec_cpu.obj dgl.dir\Release\randomwalk_cpu.obj dgl.dir\Release\randomwalk_with_restart_cpu.obj dgl.dir\Release\randomwalks.obj dgl.dir\Release\dglgraph_serialize.obj dgl.dir\Release\graph_serialize.obj dgl.dir\Release\heterograph_serialize.obj dgl.dir\Release\tensor_serialize.obj dgl.dir\Release\zerocopy_serializer.obj dgl.dir\Release\shared_mem_manager.obj dgl.dir\Release\subgraph.obj dgl.dir\Release\compact.obj dgl.dir\Release\line_graph.obj dgl.dir\Release\metis_partition_hetero.obj dgl.dir\Release\partition_hetero.obj dgl.dir\Release\remove_edges.obj dgl.dir\Release\to_bipartite.obj dgl.dir\Release\to_simple.obj dgl.dir\Release\unit_graph.obj dgl.dir\Release\scheduler.obj dgl.dir\Release\scheduler_apis.obj dgl.dir\Release\common.obj dgl.dir\Release\msg_queue.obj dgl.dir\Release\socket_communicator.obj dgl.dir\Release\socket_pool.obj dgl.dir\Release\tcp_socket.obj dgl.dir\Release\/src/array/cpu/traversal.cc.obj dgl.dir\Release\/src/array/union_partition.cc.obj dgl.dir\Release\/src/graph/transform/cpu/knn.cc.obj dgl.dir\Release\/src/graph/transform/knn.cc.obj dgl.dir\Release\/src/graph/transform/union_partition.cc.obj dgl.dir\Release\/src/graph/traversal.cc.obj nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_min_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Creating library C:/Jenkins/workspace/dgl_PR-4648/build/Release/dgl.lib and object C:/Jenkins/workspace/dgl_PR-4648/build/Release/dgl.exp Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_min_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_min_f64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_min_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_max_i8.o dgl.vcxproj -> C:\Jenkins\workspace\dgl_PR-4648\build\Release\dgl.dll PostBuildEvent: setlocal "C:\Program Files\CMake\bin\cmake.exe" -E copy C:/Jenkins/workspace/dgl_PR-4648/build/Release/dgl.dll C:/Jenkins/workspace/dgl_PR-4648/build/Release/.. if %errorlevel% neq 0 goto :cmEnd :cmEnd endlocal & call :cmErrorLevel %errorlevel% & goto :cmDone :cmErrorLevel exit /b %1 :cmDone if %errorlevel% neq 0 goto :VCEnd :VCEnd FinalizeBuildStatus: Deleting file "dgl.dir\Release\dgl.tlog\unsuccessfulbuild". Touching "dgl.dir\Release\dgl.tlog\dgl.lastbuildstate". 18>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj" (default targets). 3>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj.metaproj" (default targets). 1>Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (1) is building "C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj.metaproj" (9) on node 1 (default targets). 9>Project "C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj.metaproj" (9) is building "C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj" (19) on node 3 (default targets). 19>PrepareForBuild: Creating directory "runUnitTests.dir\Release\". Creating directory "runUnitTests.dir\Release\runUnitTests.tlog\". InitializeBuildStatus: Creating "runUnitTests.dir\Release\runUnitTests.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. CustomBuild: Building Custom Rule C:/Jenkins/workspace/dgl_PR-4648/CMakeLists.txt ClCompile: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\CL.exe /c /I"C:\Jenkins\workspace\dgl_PR-4648\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dlpack\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\xbyak" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\phmap" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\libxsmm\include" /I"C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\include" /I"C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest" /nologo /W1 /WX- /diagnostics:column /MP /O2 /Ob2 /D DGL_EXPORTS /D USE_AVX /D USE_LIBXSMM /D DGL_CPU_LLC_SIZE=40000000 /D IDXTYPEWIDTH=64 /D REALTYPEWIDTH=32 /D NDEBUG /D WIN32_LEAN_AND_MEAN /D _CRT_SECURE_NO_WARNINGS /D _SCL_SECURE_NO_WARNINGS /D NOMINMAX /D ENABLE_PARTIAL_FRONTIER=0 /D __USE_XOPEN2K8 /D DMLC_CORE_USE_CMAKE /D DMLC_USE_CXX11=1 /D "CMAKE_INTDIR=\"Release\"" /D _MBCS /Gm- /EHsc /MD /GS /fp:precise /Zc:wchar_t /Zc:forScope /Zc:inline /openmp /Fo"runUnitTests.dir\Release\\" /Fd"runUnitTests.dir\Release\vc142.pdb" /Gd /TP /errorReport:queue "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\graph_index_test.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\message_queue_test.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\socket_communicator_test.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\string_test.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_aten.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_csrmm.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_partition.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_rowwise.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_serialize.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_smart_ptr_serialize.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_coo.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_csr.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmm.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc" "C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_zerocopy_serialize.cc" graph_index_test.cc message_queue_test.cc socket_communicator_test.cc string_test.cc test_aten.cc test_csrmm.cc test_partition.cc test_rowwise.cc test_sampler.cc test_serialize.cc test_smart_ptr_serialize.cc test_spmat_coo.cc test_spmat_csr.cc test_spmm.cc test_unit_graph.cc test_zerocopy_serialize.cc nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_max_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_max_i32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_max_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_max_i64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_max_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_max_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_max_f32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_max_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\graph_index_test.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\graph_index_test.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\graph_index_test.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\graph_index_test.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_coo.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_coo.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_coo.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_partition.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_partition.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_csr.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_csr.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_csr.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_csr.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc(137,1): warning C4305: 'argument': truncation from 'int' to 'bool' [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc(400): message : see reference to function template instantiation 'void _TestUnitGraph(DGLContext)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc(139,1): warning C4305: 'argument': truncation from 'int' to 'bool' [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc(142,1): warning C4305: 'argument': truncation from 'int' to 'bool' [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc(144,1): warning C4305: 'argument': truncation from 'int' to 'bool' [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_max_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_zerocopy_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_zerocopy_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_zerocopy_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_zerocopy_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_serialize.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_aten.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_aten.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_aten.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_aten.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(4088,24): warning C4244: '=': conversion from 'const _Ty' to 'float', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _Ty=int ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc) C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\../../src/random/cpu/sample_utils.h(267): message : see reference to function template instantiation 'void std::fill>>,int>(const _FwdIt,const _FwdIt,const int &)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _Ty=float, _FwdIt=std::_Vector_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc) C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\../../src/random/cpu/sample_utils.h(264): message : while compiling class template member function 'void dgl::utils::TreeSampler::ResetState(dgl::FloatArray)' [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ Idx=int64_t, DType=float ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc) C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\../../src/random/cpu/sample_utils.h(281): message : see reference to function template instantiation 'void dgl::utils::TreeSampler::ResetState(dgl::FloatArray)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ Idx=int64_t, DType=float ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc) C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc(86): message : see reference to class template instantiation 'dgl::utils::TreeSampler' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ Idx=int64_t, DType=float ] C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc(99): message : see reference to function template instantiation 'void _TestWithoutReplacementOrder(dgl::RandomEngine *)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_premulsum_i8.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_premulsum_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_csrmm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_csrmm.cc) Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_premulsum_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_premulsum_u32.o C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_csrmm.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_csrmm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 19>C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_rowwise.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3643): message : see reference to function template instantiation '_OutIt std::_Copy_unchecked(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _InIt=const __int64 * ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_rowwise.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/array_ops.h(349): message : see reference to function template instantiation '_OutIt std::copy>>,int32_t*>(_InIt,_InIt,_OutIt)' being compiled [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] with [ _OutIt=int32_t *, _Ty=int64_t, _InIt=std::_Vector_const_iterator>> ] (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_rowwise.cc) C:\Jenkins\workspace\dgl_PR-4648\include\dgl\./aten/csr.h(226): message : see reference to function template instantiation 'dgl::IdArray dgl::aten::VecToIdArray(const std::vector> &,uint8_t,DGLContext)' being compiled (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_rowwise.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_premulsum_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_premulsum_u64.o [ 48%] Linking CXX static library ../../../lib/libgmock_main.a [ 48%] Built target gmock_main Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_premulsum_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_premulsum_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_premulsum_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_premulsum_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sumpostdiv_i8.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sumpostdiv_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sumpostdiv_i32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sumpostdiv_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sumpostdiv_i64.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sumpostdiv_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sumpostdiv_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sumpostdiv_f32.o Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sumpostdiv_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_sumpostdiv_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sum_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sum_u8.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sum_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sum_u32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sum_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sum_u64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sum_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sum_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sum_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sum_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_prod_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_prod_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_prod_i32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_prod_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_prod_i64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_prod_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_prod_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_prod_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_prod_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_prod_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_min_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_min_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_min_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_min_u32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_min_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_min_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_min_f16.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_min_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_min_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_min_bf16.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_max_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_max_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_max_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_max_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_max_i64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_max_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_max_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_max_f32.o Link: C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\bin\HostX64\x64\link.exe /ERRORREPORT:QUEUE /OUT:"C:\Jenkins\workspace\dgl_PR-4648\build\Release\runUnitTests.exe" /INCREMENTAL:NO /NOLOGO lib\Release\gtest.lib lib\Release\gtest_main.lib Release\dgl.lib lib\Release\gtest.lib "third_party\dmlc-core\Release\dmlc.lib" kernel32.lib user32.lib gdi32.lib winspool.lib shell32.lib ole32.lib oleaut32.lib uuid.lib comdlg32.lib advapi32.lib /MANIFEST /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /manifest:embed /PDB:"C:/Jenkins/workspace/dgl_PR-4648/build/Release/runUnitTests.pdb" /SUBSYSTEM:CONSOLE /TLBID:1 /DYNAMICBASE /NXCOMPAT /IMPLIB:"C:/Jenkins/workspace/dgl_PR-4648/build/Release/runUnitTests.lib" /MACHINE:X64 /machine:x64 runUnitTests.dir\Release\graph_index_test.obj runUnitTests.dir\Release\message_queue_test.obj runUnitTests.dir\Release\socket_communicator_test.obj runUnitTests.dir\Release\string_test.obj runUnitTests.dir\Release\test_aten.obj runUnitTests.dir\Release\test_csrmm.obj runUnitTests.dir\Release\test_partition.obj runUnitTests.dir\Release\test_rowwise.obj runUnitTests.dir\Release\test_sampler.obj runUnitTests.dir\Release\test_serialize.obj runUnitTests.dir\Release\test_smart_ptr_serialize.obj runUnitTests.dir\Release\test_spmat_coo.obj runUnitTests.dir\Release\test_spmat_csr.obj runUnitTests.dir\Release\test_spmm.obj runUnitTests.dir\Release\test_unit_graph.obj runUnitTests.dir\Release\test_zerocopy_serialize.obj Creating library C:/Jenkins/workspace/dgl_PR-4648/build/Release/runUnitTests.lib and object C:/Jenkins/workspace/dgl_PR-4648/build/Release/runUnitTests.exp nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_max_f64.o runUnitTests.vcxproj -> C:\Jenkins\workspace\dgl_PR-4648\build\Release\runUnitTests.exe FinalizeBuildStatus: Deleting file "runUnitTests.dir\Release\runUnitTests.tlog\unsuccessfulbuild". Touching "runUnitTests.dir\Release\runUnitTests.tlog\runUnitTests.lastbuildstate". 19>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj" (default targets). 9>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj.metaproj" (default targets). 2>Project "C:\Jenkins\workspace\dgl_PR-4648\build\ALL_BUILD.vcxproj.metaproj" (2) is building "C:\Jenkins\workspace\dgl_PR-4648\build\ALL_BUILD.vcxproj" (20) on node 4 (default targets). 20>PrepareForBuild: Creating directory "x64\Release\ALL_BUILD\". Creating directory "x64\Release\ALL_BUILD\ALL_BUILD.tlog\". InitializeBuildStatus: Creating "x64\Release\ALL_BUILD\ALL_BUILD.tlog\unsuccessfulbuild" because "AlwaysCreate" was specified. CustomBuild: Building Custom Rule C:/Jenkins/workspace/dgl_PR-4648/CMakeLists.txt nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_max_bf16.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_premulsum_i8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). FinalizeBuildStatus: Deleting file "x64\Release\ALL_BUILD\ALL_BUILD.tlog\unsuccessfulbuild". Touching "x64\Release\ALL_BUILD\ALL_BUILD.tlog\ALL_BUILD.lastbuildstate". 20>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\ALL_BUILD.vcxproj" (default targets). 2>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\ALL_BUILD.vcxproj.metaproj" (default targets). 1>Done Building Project "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default targets). Build succeeded. "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default target) (1) -> "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj.metaproj" (default target) (4) -> "C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj" (default target) (16) -> (ClCompile target) -> C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\indexed_recordio_split.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\recordio_split.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\line_split.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\filesys.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\input_split_base.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\include\dmlc/filesystem.h(19,1): warning C4005: 'NOMINMAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\third_party\dmlc-core\src\io\local_filesys.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\third_party\dmlc-core\dmlc.vcxproj] "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default target) (1) -> "C:\Jenkins\workspace\dgl_PR-4648\build\tensoradapter_pytorch.vcxproj.metaproj" (default target) (10) -> "C:\Jenkins\workspace\dgl_PR-4648\build\tensoradapter_pytorch.vcxproj" (default target) (12) -> (CustomBuild target) -> CUSTOMBUILD : CMake warning : [C:\Jenkins\workspace\dgl_PR-4648\build\tensoradapter_pytorch.vcxproj] "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default target) (1) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj.metaproj" (default target) (3) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj" (default target) (18) -> (ClCompile target) -> C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_nonzero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_cumsum.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_index_select.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_scatter.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_coalesce.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_sort.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_repeat.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_linegraph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_pack.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\array_op_impl.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\gather_mm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_sort.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_to_simple.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_union.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\disjoint_union.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sort.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_topk.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\segment_reduce.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\negative_sampling.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\filter.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\rowwise_sampling.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\array_arith.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_get_data.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\coo_remove.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4267: 'initializing': conversion from 'size_t' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_remove.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4267: 'initializing': conversion from 'size_t' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_mm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(4088,24): warning C4244: '=': conversion from 'const _Ty' to 'IdType', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\csr_sum.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4267: 'initializing': conversion from 'size_t' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\array.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_csr.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\sddmm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4267: 'initializing': conversion from 'size_t' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\cpu\spmat_op_impl_coo.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\numeric(817,20): warning C4244: '=': conversion from '_Ty' to 'IdType', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4244: 'initializing': conversion from '_Ty' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(396,16): warning C4477: 'fscanf' : format string '%ld' requires an argument of type 'long *', but variadic argument 1 has type 'int64_t *' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(396,16): warning C4477: 'fscanf' : format string '%ld' requires an argument of type 'long *', but variadic argument 2 has type 'int64_t *' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\uvm_array.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\c_api_common.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\geometry\cpu\geometry_op_impl.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\partition\ndarray_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\geometry\geometry.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): warning C4477: 'fprintf' : format string '%ld' requires an argument of type 'long', but variadic argument 1 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): warning C4477: 'fprintf' : format string '%ld' requires an argument of type 'long', but variadic argument 2 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(273,21): warning C4477: 'fprintf' : format string '%ld' requires an argument of type 'long', but variadic argument 3 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(284,10): warning C4477: 'printf' : format string '%ld' requires an argument of type 'long', but variadic argument 1 has type 'unsigned __int64' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(287,17): warning C4477: 'fprintf' : format string '%ld' requires an argument of type 'long', but variadic argument 1 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(291,12): warning C4477: 'printf' : format string '%ld' requires an argument of type 'long', but variadic argument 1 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc(296,12): warning C4477: 'printf' : format string '%ld' requires an argument of type 'long', but variadic argument 1 has type 'int64_t' [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\random.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(4088,24): warning C4244: '=': conversion from 'const _Ty' to 'float', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\random\cpu\choice.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\kernel.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\array\libra_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(51,1): warning C4005: 'INT32_MIN': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(52,1): warning C4005: 'INT64_MIN': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(55,1): warning C4005: 'INT32_MAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\stdint.h(56,1): warning C4005: 'INT64_MAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\runtime\utils.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\creators.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\gk_ops.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_traversal.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\get_node_types_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\metis_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\nodeflow.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_apis.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\negative\global_uniform.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\immutable_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const unsigned __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\neighbor\neighbor.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc(1526,1): warning C4805: '==': unsafe mix of type 'int64_t' and type 'bool' in operation [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\pickle.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xstddef(240,54): warning C4018: '<': signed/unsigned mismatch (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc(1751,1): warning C4805: '==': unsafe mix of type 'int64_t' and type 'bool' in operation [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xstddef(242,43): warning C4018: '<': signed/unsigned mismatch (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\node2vec_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const unsigned __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\graph_op.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_with_restart_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalks.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\network.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\tensor_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\heterograph_capi.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampling\randomwalks\randomwalk_cpu.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(207,16): warning C4244: 'initializing': conversion from '_Ty' to '_Ty1', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\dglgraph_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,1): warning C4267: '=': conversion from 'size_t' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\sampler.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\graph_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(75,1): warning C4005: 'INT32_MIN': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(76,1): warning C4005: 'INT32_MAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(77,1): warning C4005: 'INT64_MIN': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\third_party\METIS\include\metis.h(78,1): warning C4005: 'INT64_MAX': macro redefinition (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\serialize\heterograph_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\shared_mem_manager.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\subgraph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\metis_partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\line_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\remove_edges.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\tuple(632,57): warning C4244: '=': conversion from '_Ty' to 'IdType', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\tuple(633,58): warning C4244: '=': conversion from '_Ty' to 'IdType', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4244: 'initializing': conversion from '_Ty' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\compact.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_simple.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const unsigned __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\partition_hetero.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\scheduler\scheduler_apis.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\unit_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\utility(151,1): warning C4267: 'initializing': conversion from 'size_t' to '_Ty2', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\graph\transform\to_bipartite.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\socket_communicator.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default target) (1) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj.metaproj" (default target) (3) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj" (default target) (18) -> C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default target) (1) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj.metaproj" (default target) (3) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj" (default target) (18) -> C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default target) (1) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj.metaproj" (default target) (3) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj" (default target) (18) -> C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default target) (1) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj.metaproj" (default target) (3) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj" (default target) (18) -> C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default target) (1) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj.metaproj" (default target) (3) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj" (default target) (18) -> C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default target) (1) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj.metaproj" (default target) (3) -> "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj" (default target) (18) -> C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\dgl.vcxproj] "C:\Jenkins\workspace\dgl_PR-4648\build\dgl.sln" (default target) (1) -> "C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj.metaproj" (default target) (9) -> "C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj" (default target) (19) -> C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\graph_index_test.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_coo.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_partition.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_spmat_csr.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc(137,1): warning C4305: 'argument': truncation from 'int' to 'bool' [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc(139,1): warning C4305: 'argument': truncation from 'int' to 'bool' [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc(142,1): warning C4305: 'argument': truncation from 'int' to 'bool' [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc(144,1): warning C4305: 'argument': truncation from 'int' to 'bool' [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_zerocopy_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_serialize.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_unit_graph.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_aten.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(4088,24): warning C4244: '=': conversion from 'const _Ty' to 'float', possible loss of data [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_sampler.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_csrmm.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.25.28610\include\xutility(3606,22): warning C4244: '=': conversion from 'const __int64' to 'int32_t', possible loss of data (compiling source file C:\Jenkins\workspace\dgl_PR-4648\tests\cpp\test_rowwise.cc) [C:\Jenkins\workspace\dgl_PR-4648\build\runUnitTests.vcxproj] 148 Warning(s) 0 Error(s) Time Elapsed 00:00:44.54 1 file(s) copied. Could Not Find C:\Jenkins\workspace\dgl_PR-4648\python\build nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_premulsum_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_premulsum_i32.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_premulsum_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_premulsum_i64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_premulsum_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_premulsum_f16.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_premulsum_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Obtaining file:///C:/Jenkins/workspace/dgl_PR-4648/python Preparing metadata (setup.py): started Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_premulsum_f64.o Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_premulsum_bf16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_i8.o Preparing metadata (setup.py): finished with status 'done' Requirement already satisfied: numpy>=1.14.0 in c:\program files\python36\lib\site-packages (from dgl==0.9) (1.19.5) Requirement already satisfied: scipy>=1.1.0 in c:\program files\python36\lib\site-packages (from dgl==0.9) (1.2.0) Requirement already satisfied: networkx>=2.1 in c:\program files\python36\lib\site-packages (from dgl==0.9) (2.2) Requirement already satisfied: requests>=2.19.0 in c:\program files\python36\lib\site-packages (from dgl==0.9) (2.24.0) Requirement already satisfied: tqdm in c:\program files\python36\lib\site-packages (from dgl==0.9) (4.58.0) Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_u8.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_i32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_u32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_i64.o Collecting psutil>=5.8.0 Using cached psutil-5.9.2-cp36-cp36m-win_amd64.whl (248 kB) Requirement already satisfied: decorator>=4.3.0 in c:\program files\python36\lib\site-packages (from networkx>=2.1->dgl==0.9) (4.4.2) Requirement already satisfied: chardet<4,>=3.0.2 in c:\program files\python36\lib\site-packages (from requests>=2.19.0->dgl==0.9) (3.0.4) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\program files\python36\lib\site-packages (from requests>=2.19.0->dgl==0.9) (1.25.10) Requirement already satisfied: idna<3,>=2.5 in c:\program files\python36\lib\site-packages (from requests>=2.19.0->dgl==0.9) (2.10) Requirement already satisfied: certifi>=2017.4.17 in c:\program files\python36\lib\site-packages (from requests>=2.19.0->dgl==0.9) (2020.6.20) Installing collected packages: psutil, dgl nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_u64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_f16.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Running setup.py develop for dgl nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_f32.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_f64.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Compiling reduce_scatter.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/reduce_scatter_sumpostdiv_bf16.o Compiling functions.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/functions.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Successfully installed dgl psutil-5.9.2 [Pipeline] echo Packing build\dgl.dll, build\runUnitTests.exe, build\tensoradapter\pytorch\*.dll into dgl-cpu-win64 Compiling onerank_reduce.cu > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/onerank_reduce.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [Pipeline] stash Stashed 3 file(s) Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... /usr/bin/ar: creating lib/libxsmm.a /usr/bin/ar: creating lib/libxsmmext.a /usr/bin/ar: creating lib/libxsmm.a /usr/bin/ar: creating lib/libxsmmext.a ================================================================================ LIBXSMM master-1.16.1-1534 (Linux@4e33dd6e703e) -------------------------------------------------------------------------------- GNU Compiler Collection: gcc 7.5.0, and g++ 7.5.0 C / C++ target: -msse4.2 Fortran Compiler is disabled or missing: no Fortran interface is built! -------------------------------------------------------------------------------- BLAS dependency and fallback is removed! -------------------------------------------------------------------------------- [ 49%] Built target libxsmm ================================================================================ LIBXSMM master-1.16.1-1534 (Linux@681e11859cd1) -------------------------------------------------------------------------------- GNU Compiler Collection: gcc 7.5.0, and g++ 7.5.0 C / C++ target: -msse4.2 Fortran Compiler is disabled or missing: no Fortran interface is built! -------------------------------------------------------------------------------- BLAS dependency and fallback is removed! -------------------------------------------------------------------------------- [ 48%] Built target libxsmm [WS-CLEANUP] done [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } [Pipeline] // stage [Pipeline] } [ 92%] Linking CXX shared library libdgl.so [ 92%] Built target dgl Scanning dependencies of target rpc_client Scanning dependencies of target rpc_server [ 93%] Building CXX object CMakeFiles/rpc_client.dir/tests/dist/cpp/rpc_client.cc.o [ 93%] Building CXX object CMakeFiles/rpc_server.dir/tests/dist/cpp/rpc_server.cc.o Scanning dependencies of target runUnitTests [ 95%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/graph_index_test.cc.o [ 95%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_csrmm.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_partition.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_aten.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_spmat_coo.cc.o [ 98%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_rowwise.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_spmm.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/socket_communicator_test.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_spmat_csr.cc.o [100%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_zerocopy_serialize.cc.o [100%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_serialize.cc.o [ 98%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/message_queue_test.cc.o [ 98%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_unit_graph.cc.o [ 98%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/string_test.cc.o [ 98%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_sampler.cc.o [ 98%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_smart_ptr_serialize.cc.o /root/jenkins/workspace/dgl_PR-4648/tests/dist/cpp/rpc_client.cc: In member function 'void RPCClient::StartClient()': /root/jenkins/workspace/dgl_PR-4648/tests/dist/cpp/rpc_client.cc:50:15: warning: unused variable 'num_machines' [-Wunused-variable] const int num_machines = ips_.size(); ^~~~~~~~~~~~ [100%] Linking CXX executable rpc_server [100%] Linking CXX executable rpc_client [100%] Built target rpc_server [100%] Built target rpc_client [100%] Linking CXX executable runUnitTests [100%] Built target runUnitTests ~/jenkins/workspace/dgl_PR-4648 ~/jenkins/workspace/dgl_PR-4648/python ~/jenkins/workspace/dgl_PR-4648 WARNING: Skipping dgl as it is not installed. WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running install running bdist_egg running egg_info creating dgl.egg-info writing dgl.egg-info/PKG-INFO writing dependency_links to dgl.egg-info/dependency_links.txt writing requirements to dgl.egg-info/requires.txt writing top-level names to dgl.egg-info/top_level.txt writing manifest file 'dgl.egg-info/SOURCES.txt' reading manifest file 'dgl.egg-info/SOURCES.txt' writing manifest file 'dgl.egg-info/SOURCES.txt' installing library code to build/bdist.linux-x86_64/egg running install_lib running build_py creating build creating build/lib.linux-x86_64-3.7 creating build/lib.linux-x86_64-3.7/dgl copying dgl/partition.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/core.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/subgraph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/traversal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/base.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/sparse.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/global_config.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/_api_internal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/network.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/logging.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/graph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/udf.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/ndarray.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/init.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/view.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/generators.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/convert.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/container.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/merge.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/readout.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/__init__.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/propagate.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/batch.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/frame.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/random.py -> build/lib.linux-x86_64-3.7/dgl creating build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/sp_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/diag_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/reduction.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/__init__.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/elementwise_op_sp.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse creating build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/edge_coarsening.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/fps.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/capi.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/__init__.py -> build/lib.linux-x86_64-3.7/dgl/geometry creating build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/base.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/message.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/reducer.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/__init__.py -> build/lib.linux-x86_64-3.7/dgl/function creating build/lib.linux-x86_64-3.7/dgl/nn copying dgl/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn creating build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/dis_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/unified_tensor.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/graph_store.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib creating build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/checks.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/exception.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/pin_memory.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/internal.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/filter.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/data.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/__init__.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/shared_mem.py -> build/lib.linux-x86_64-3.7/dgl/utils creating build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/pytorch.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/__init__.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing creating build/lib.linux-x86_64-3.7/dgl/optim copying dgl/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim creating build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/base.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/graphsaint.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/neighbor_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading creating build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading creating build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/backend.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/set_default_backend.py -> build/lib.linux-x86_64-3.7/dgl/backend creating build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/edge_softmax.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/sddmm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/gather_mm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/segment.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/__init__.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/spmm.py -> build/lib.linux-x86_64-3.7/dgl/ops creating build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/nccl.py -> build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/__init__.py -> build/lib.linux-x86_64-3.7/dgl/cuda creating build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/pytorch_tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/base.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/__init__.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/numpy.py -> build/lib.linux-x86_64-3.7/dgl/storages creating build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/pinsage.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/negative.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/randomwalks.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/utils.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/node2vec_randomwalk.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/sampling creating build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm7b.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/graph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/dgl_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset_base.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9_edge.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gnn_benchmark.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/icews18.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gdelt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/utils.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/sbm.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/karate.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/citation_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/rdf.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/bitcoinotc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/ppi.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tensor_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/flickr.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/wikics.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/heterograph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tu.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gindt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tree.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/synthetic.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/adapter.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fakenews.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fraud.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/minigc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/yelp.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/reddit.py -> build/lib.linux-x86_64-3.7/dgl/data creating build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_graph.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_partition_book.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/partition.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/standalone_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_services.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/role.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/constants.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_server.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_context.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_client.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/id_map.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/server_state.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/shared_mem_utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_tensor.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/distributed creating build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/nodeflow.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/udf.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/view.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/kernel.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/graph.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/frame.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate creating build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/functional.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/__init__.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/module.py -> build/lib.linux-x86_64-3.7/dgl/transforms creating build/lib.linux-x86_64-3.7/dgl/distgnn copying dgl/distgnn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn creating build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/runtime_ctypes.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/libinfo.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/streams.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/base.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object_generic.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/linear.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/factory.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/nn/functional copying dgl/nn/functional/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/functional creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transe.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/edgepred.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transr.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dgnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/grouprevres.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/twirlsconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gcn2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dotgatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/cfconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gineconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatv2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/hgtconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/pnaconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/atomicconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/gnnexplainer.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv creating build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/dis_sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling creating build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data creating build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/optim/mxnet copying dgl/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/optim/tensorflow copying dgl/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet creating build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/nn copying dgl/distributed/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn creating build/lib.linux-x86_64-3.7/dgl/distributed/optim copying dgl/distributed/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet copying dgl/distributed/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow copying dgl/distributed/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet copying dgl/distributed/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow copying dgl/distributed/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/spmv.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/degree_bucketing.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/scheduler.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/adapter.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/runtime.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/program.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/executor.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/var.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/registry.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir creating build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/libra_partition.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition creating build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/tools.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools creating build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/types.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 copying dgl/_ffi/_cy3/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 copying dgl/_ffi/_cy2/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cython copying dgl/_ffi/_cython/core.cpp -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cython running build_ext building 'dgl._ffi._cy3.core' extension creating build/temp.linux-x86_64-3.7 creating build/temp.linux-x86_64-3.7/dgl creating build/temp.linux-x86_64-3.7/dgl/_ffi creating build/temp.linux-x86_64-3.7/dgl/_ffi/_cython gcc -pthread -B /opt/conda/envs/pytorch-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/pytorch-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/pytorch-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/pytorch-ci/compiler_compat -L/opt/conda/envs/pytorch-ci/lib -Wl,-rpath=/opt/conda/envs/pytorch-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 creating build/bdist.linux-x86_64 creating build/bdist.linux-x86_64/egg creating build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/sp_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/diag_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/reduction.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/__init__.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/elementwise_op_sp.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse creating build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/edge_coarsening.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/fps.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/capi.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/__init__.py -> build/bdist.linux-x86_64/egg/dgl/geometry creating build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/base.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/message.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/reducer.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/__init__.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/partition.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transe.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/edgepred.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transr.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/linear.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dgnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/grouprevres.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/twirlsconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gcn2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dotgatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/cfconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gineconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatv2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/hgtconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/pnaconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/atomicconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/gnnexplainer.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/factory.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/nn/functional/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/core.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/subgraph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/dis_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/unified_tensor.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/graph_store.py -> build/bdist.linux-x86_64/egg/dgl/contrib creating build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/dis_sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling creating build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/traversal.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/checks.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/exception.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/pin_memory.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/internal.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/filter.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/data.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/__init__.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/shared_mem.py -> build/bdist.linux-x86_64/egg/dgl/utils creating build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/pytorch.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/__init__.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/base.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/heterograph_index.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/sparse.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/global_config.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/base.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/graphsaint.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/neighbor_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_api_internal.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/heterograph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/backend/backend.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch creating build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/set_default_backend.py -> build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/network.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/edge_softmax.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/sddmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/gather_mm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/segment.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/__init__.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/spmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/logging.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/graph_index.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/nccl.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/__init__.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/udf.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/pytorch_tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/base.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/__init__.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/numpy.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/ndarray.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/init.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/view.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/pinsage.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/negative.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/randomwalks.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/utils.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/node2vec_randomwalk.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/generators.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm7b.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/graph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/dgl_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset_base.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9_edge.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gnn_benchmark.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/icews18.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gdelt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/utils.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/sbm.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/karate.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/citation_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/rdf.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/bitcoinotc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/ppi.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tensor_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/flickr.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/wikics.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/heterograph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tu.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gindt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tree.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/synthetic.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/adapter.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fakenews.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fraud.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/minigc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/yelp.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/reddit.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/convert.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/container.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/merge.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/readout.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/__init__.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_graph.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_partition_book.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/partition.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/standalone_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_services.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/role.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/constants.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_server.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_context.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_client.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/id_map.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/server_state.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/shared_mem_utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_tensor.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/nodeflow.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/udf.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/view.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/kernel.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/spmv.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/degree_bucketing.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/scheduler.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/adapter.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/runtime.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/program.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/executor.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/var.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/registry.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/graph.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/frame.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/propagate.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/batch.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/functional.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/__init__.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/module.py -> build/bdist.linux-x86_64/egg/dgl/transforms creating build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/libra_partition.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition creating build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/tools.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/runtime_ctypes.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/libinfo.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/streams.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/base.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/types.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object_generic.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cython/core.cpp -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/frame.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/random.py -> build/bdist.linux-x86_64/egg/dgl byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/sp_matrix.py to sp_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/diag_matrix.py to diag_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/reduction.py to reduction.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/elementwise_op_sp.py to elementwise_op_sp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/edge_coarsening.py to edge_coarsening.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/fps.py to fps.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/capi.py to capi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/message.py to message.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/reducer.py to reducer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transe.py to transe.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/edgepred.py to edgepred.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transr.py to transr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/linear.py to linear.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dgnconv.py to dgnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/grouprevres.py to grouprevres.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/twirlsconv.py to twirlsconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egatconv.py to egatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gcn2conv.py to gcn2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dotgatconv.py to dotgatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/cfconv.py to cfconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gineconv.py to gineconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatv2conv.py to gatv2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/hgtconv.py to hgtconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/pnaconv.py to pnaconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/atomicconv.py to atomicconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egnnconv.py to egnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/gnnexplainer.py to gnnexplainer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/factory.py to factory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/functional/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/core.py to core.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/subgraph.py to subgraph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/dis_kvstore.py to dis_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/unified_tensor.py to unified_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/graph_store.py to graph_store.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/sampler.py to sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/dis_sampler.py to dis_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/traversal.py to traversal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/checks.py to checks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/exception.py to exception.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/pin_memory.py to pin_memory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/internal.py to internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/filter.py to filter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/data.py to data.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/shared_mem.py to shared_mem.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/pytorch.py to pytorch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph_index.py to heterograph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/global_config.py to global_config.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/graphsaint.py to graphsaint.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/neighbor_sampler.py to neighbor_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_api_internal.py to _api_internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph.py to heterograph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/backend.py to backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/set_default_backend.py to set_default_backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/network.py to network.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/edge_softmax.py to edge_softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/sddmm.py to sddmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/gather_mm.py to gather_mm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/segment.py to segment.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/spmm.py to spmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/logging.py to logging.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/graph_index.py to graph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/nccl.py to nccl.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/pytorch_tensor.py to pytorch_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/numpy.py to numpy.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/init.py to init.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/pinsage.py to pinsage.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/negative.py to negative.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/randomwalks.py to randomwalks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/node2vec_randomwalk.py to node2vec_randomwalk.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/generators.py to generators.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm7b.py to qm7b.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/graph_serialize.py to graph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/dgl_dataset.py to dgl_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset_base.py to csv_dataset_base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9_edge.py to qm9_edge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gnn_benchmark.py to gnn_benchmark.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9.py to qm9.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/icews18.py to icews18.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gdelt.py to gdelt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/sbm.py to sbm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset.py to csv_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/karate.py to karate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/citation_graph.py to citation_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/rdf.py to rdf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/bitcoinotc.py to bitcoinotc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/ppi.py to ppi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tensor_serialize.py to tensor_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/flickr.py to flickr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/wikics.py to wikics.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/heterograph_serialize.py to heterograph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tu.py to tu.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gindt.py to gindt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tree.py to tree.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/synthetic.py to synthetic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fakenews.py to fakenews.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fraud.py to fraud.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/minigc.py to minigc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/yelp.py to yelp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/reddit.py to reddit.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/convert.py to convert.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/container.py to container.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/merge.py to merge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/readout.py to readout.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_graph.py to dist_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_partition_book.py to graph_partition_book.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/standalone_kvstore.py to standalone_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_services.py to graph_services.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/role.py to role.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/constants.py to constants.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_server.py to rpc_server.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/kvstore.py to kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_context.py to dist_context.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc.py to rpc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_client.py to rpc_client.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/id_map.py to id_map.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/server_state.py to server_state.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/shared_mem_utils.py to shared_mem_utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_tensor.py to dist_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/nodeflow.py to nodeflow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/kernel.py to kernel.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/spmv.py to spmv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/degree_bucketing.py to degree_bucketing.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/scheduler.py to scheduler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/runtime.py to runtime.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/program.py to program.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/executor.py to executor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/var.py to var.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/registry.py to registry.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/graph.py to graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/propagate.py to propagate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/batch.py to batch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/functional.py to functional.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/module.py to module.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/libra_partition.py to libra_partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/tools.py to tools.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/runtime_ctypes.py to runtime_ctypes.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/libinfo.py to libinfo.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/streams.py to streams.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/types.py to types.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object_generic.py to object_generic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/random.py to random.cpython-37.pyc creating stub loader for dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/core.py to core.cpython-37.pyc installing package data to build/bdist.linux-x86_64/egg running install_data copying ../build/libdgl.so -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/tensoradapter creating build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch copying ../build/tensoradapter/pytorch/libtensoradapter_pytorch_1.9.0.so -> build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch creating build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/not-zip-safe -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO writing build/bdist.linux-x86_64/egg/EGG-INFO/native_libs.txt creating dist creating 'dist/dgl-0.9-py3.7-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it removing 'build/bdist.linux-x86_64/egg' (and everything under it) Processing dgl-0.9-py3.7-linux-x86_64.egg creating /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Extracting dgl-0.9-py3.7-linux-x86_64.egg to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding dgl 0.9 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Processing dependencies for dgl==0.9 Searching for psutil>=5.8.0 Reading https://pypi.org/simple/psutil/ Downloading https://files.pythonhosted.org/packages/3d/73/d8c87b5612c58d1e6c6d91997c1590771d34e4ee27d9c11eb1e64ecbf365/psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=4fb54941aac044a61db9d8eb56fc5bee207db3bc58645d657249030e15ba3727 Best match: psutil 5.9.2 Processing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl Installing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding psutil 5.9.2 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/psutil-5.9.2-py3.7-linux-x86_64.egg Searching for tqdm==4.64.0 Best match: tqdm 4.64.0 Adding tqdm 4.64.0 to easy-install.pth file Installing tqdm script to /opt/conda/envs/pytorch-ci/bin Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for requests==2.28.1 Best match: requests 2.28.1 Adding requests 2.28.1 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for networkx==2.6.3 Best match: networkx 2.6.3 Adding networkx 2.6.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for scipy==1.7.3 Best match: scipy 1.7.3 Adding scipy 1.7.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for numpy==1.21.6 Best match: numpy 1.21.6 Adding numpy 1.21.6 to easy-install.pth file Installing f2py script to /opt/conda/envs/pytorch-ci/bin Installing f2py3 script to /opt/conda/envs/pytorch-ci/bin Installing f2py3.7 script to /opt/conda/envs/pytorch-ci/bin Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for certifi==2022.6.15 Best match: certifi 2022.6.15 Adding certifi 2022.6.15 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for urllib3==1.26.11 Best match: urllib3 1.26.11 Adding urllib3 1.26.11 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for idna==3.3 Best match: idna 3.3 Adding idna 3.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for charset-normalizer==2.1.0 Best match: charset-normalizer 2.1.0 Adding charset-normalizer 2.1.0 to easy-install.pth file Installing normalizer script to /opt/conda/envs/pytorch-ci/bin Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Finished processing dependencies for dgl==0.9 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/command/install.py:37: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools. setuptools.SetuptoolsDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/command/easy_install.py:147: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools. EasyInstallDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running build_ext building 'dgl._ffi._cy3.core' extension gcc -pthread -B /opt/conda/envs/pytorch-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/pytorch-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/pytorch-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/pytorch-ci/compiler_compat -L/opt/conda/envs/pytorch-ci/lib -Wl,-rpath=/opt/conda/envs/pytorch-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> dgl/_ffi/_cy3 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) WARNING: Skipping dgl as it is not installed. WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running install running bdist_egg running egg_info creating dgl.egg-info writing dgl.egg-info/PKG-INFO writing dependency_links to dgl.egg-info/dependency_links.txt writing requirements to dgl.egg-info/requires.txt writing top-level names to dgl.egg-info/top_level.txt writing manifest file 'dgl.egg-info/SOURCES.txt' reading manifest file 'dgl.egg-info/SOURCES.txt' writing manifest file 'dgl.egg-info/SOURCES.txt' installing library code to build/bdist.linux-x86_64/egg running install_lib running build_py creating build creating build/lib.linux-x86_64-3.7 creating build/lib.linux-x86_64-3.7/dgl copying dgl/partition.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/core.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/subgraph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/traversal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/base.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/sparse.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/global_config.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/_api_internal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/network.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/logging.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/graph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/udf.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/ndarray.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/init.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/view.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/generators.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/convert.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/container.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/merge.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/readout.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/__init__.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/propagate.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/batch.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/frame.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/random.py -> build/lib.linux-x86_64-3.7/dgl creating build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/sp_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/diag_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/reduction.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/__init__.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/elementwise_op_sp.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse creating build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/edge_coarsening.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/fps.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/capi.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/__init__.py -> build/lib.linux-x86_64-3.7/dgl/geometry creating build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/base.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/message.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/reducer.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/__init__.py -> build/lib.linux-x86_64-3.7/dgl/function creating build/lib.linux-x86_64-3.7/dgl/nn copying dgl/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn creating build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/dis_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/unified_tensor.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/graph_store.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib creating build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/checks.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/exception.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/pin_memory.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/internal.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/filter.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/data.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/__init__.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/shared_mem.py -> build/lib.linux-x86_64-3.7/dgl/utils creating build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/pytorch.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/__init__.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing creating build/lib.linux-x86_64-3.7/dgl/optim copying dgl/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim creating build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/base.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/graphsaint.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/neighbor_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading creating build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading creating build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/backend.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/set_default_backend.py -> build/lib.linux-x86_64-3.7/dgl/backend creating build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/edge_softmax.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/sddmm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/gather_mm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/segment.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/__init__.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/spmm.py -> build/lib.linux-x86_64-3.7/dgl/ops creating build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/nccl.py -> build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/__init__.py -> build/lib.linux-x86_64-3.7/dgl/cuda creating build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/pytorch_tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/base.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/__init__.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/numpy.py -> build/lib.linux-x86_64-3.7/dgl/storages creating build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/pinsage.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/negative.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/randomwalks.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/utils.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/node2vec_randomwalk.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/sampling creating build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm7b.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/graph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/dgl_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset_base.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9_edge.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gnn_benchmark.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/icews18.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gdelt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/utils.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/sbm.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/karate.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/citation_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/rdf.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/bitcoinotc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/ppi.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tensor_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/flickr.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/wikics.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/heterograph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tu.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gindt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tree.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/synthetic.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/adapter.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fakenews.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fraud.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/minigc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/yelp.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/reddit.py -> build/lib.linux-x86_64-3.7/dgl/data creating build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_graph.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_partition_book.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/partition.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/standalone_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_services.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/role.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/constants.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_server.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_context.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_client.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/id_map.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/server_state.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/shared_mem_utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_tensor.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/distributed creating build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/nodeflow.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/udf.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/view.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/kernel.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/graph.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/frame.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate creating build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/functional.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/__init__.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/module.py -> build/lib.linux-x86_64-3.7/dgl/transforms creating build/lib.linux-x86_64-3.7/dgl/distgnn copying dgl/distgnn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn creating build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/runtime_ctypes.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/libinfo.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/streams.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/base.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object_generic.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/linear.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/factory.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/nn/functional copying dgl/nn/functional/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/functional creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transe.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/edgepred.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transr.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dgnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/grouprevres.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/twirlsconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gcn2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dotgatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/cfconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gineconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatv2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/hgtconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/pnaconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/atomicconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/gnnexplainer.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv creating build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/dis_sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling creating build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data creating build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/optim/mxnet copying dgl/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/optim/tensorflow copying dgl/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet creating build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/nn copying dgl/distributed/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn creating build/lib.linux-x86_64-3.7/dgl/distributed/optim copying dgl/distributed/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet copying dgl/distributed/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow copying dgl/distributed/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet copying dgl/distributed/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow copying dgl/distributed/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/spmv.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/degree_bucketing.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/scheduler.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/adapter.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/runtime.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/program.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/executor.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/var.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/registry.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir creating build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/libra_partition.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition creating build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/tools.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools creating build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/types.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 copying dgl/_ffi/_cy3/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 copying dgl/_ffi/_cy2/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cython copying dgl/_ffi/_cython/core.cpp -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cython running build_ext building 'dgl._ffi._cy3.core' extension creating build/temp.linux-x86_64-3.7 creating build/temp.linux-x86_64-3.7/dgl creating build/temp.linux-x86_64-3.7/dgl/_ffi creating build/temp.linux-x86_64-3.7/dgl/_ffi/_cython gcc -pthread -B /opt/conda/envs/mxnet-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/mxnet-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/mxnet-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/mxnet-ci/compiler_compat -L/opt/conda/envs/mxnet-ci/lib -Wl,-rpath=/opt/conda/envs/mxnet-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 creating build/bdist.linux-x86_64 creating build/bdist.linux-x86_64/egg creating build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/sp_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/diag_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/reduction.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/__init__.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/elementwise_op_sp.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse creating build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/edge_coarsening.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/fps.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/capi.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/__init__.py -> build/bdist.linux-x86_64/egg/dgl/geometry creating build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/base.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/message.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/reducer.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/__init__.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/partition.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transe.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/edgepred.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transr.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/linear.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dgnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/grouprevres.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/twirlsconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gcn2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dotgatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/cfconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gineconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatv2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/hgtconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/pnaconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/atomicconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/gnnexplainer.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/factory.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/nn/functional/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/core.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/subgraph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/dis_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/unified_tensor.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/graph_store.py -> build/bdist.linux-x86_64/egg/dgl/contrib creating build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/dis_sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling creating build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/traversal.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/checks.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/exception.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/pin_memory.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/internal.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/filter.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/data.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/__init__.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/shared_mem.py -> build/bdist.linux-x86_64/egg/dgl/utils creating build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/pytorch.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/__init__.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/base.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/heterograph_index.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/sparse.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/global_config.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/base.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/graphsaint.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/neighbor_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_api_internal.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/heterograph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/backend/backend.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch creating build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/set_default_backend.py -> build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/network.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/edge_softmax.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/sddmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/gather_mm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/segment.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/__init__.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/spmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/logging.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/graph_index.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/nccl.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/__init__.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/udf.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/pytorch_tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/base.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/__init__.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/numpy.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/ndarray.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/init.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/view.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/pinsage.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/negative.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/randomwalks.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/utils.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/node2vec_randomwalk.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/generators.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm7b.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/graph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/dgl_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset_base.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9_edge.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gnn_benchmark.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/icews18.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gdelt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/utils.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/sbm.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/karate.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/citation_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/rdf.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/bitcoinotc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/ppi.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tensor_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/flickr.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/wikics.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/heterograph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tu.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gindt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tree.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/synthetic.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/adapter.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fakenews.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fraud.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/minigc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/yelp.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/reddit.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/convert.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/container.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/merge.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/readout.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/__init__.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_graph.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_partition_book.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/partition.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/standalone_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_services.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/role.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/constants.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_server.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_context.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_client.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/id_map.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/server_state.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/shared_mem_utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_tensor.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/nodeflow.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/udf.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/view.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/kernel.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/spmv.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/degree_bucketing.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/scheduler.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/adapter.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/runtime.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/program.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/executor.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/var.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/registry.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/graph.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/frame.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/propagate.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/batch.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/functional.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/__init__.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/module.py -> build/bdist.linux-x86_64/egg/dgl/transforms creating build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/libra_partition.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition creating build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/tools.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/runtime_ctypes.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/libinfo.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/streams.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/base.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/types.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object_generic.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cython/core.cpp -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/frame.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/random.py -> build/bdist.linux-x86_64/egg/dgl byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/sp_matrix.py to sp_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/diag_matrix.py to diag_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/reduction.py to reduction.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/elementwise_op_sp.py to elementwise_op_sp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/edge_coarsening.py to edge_coarsening.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/fps.py to fps.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/capi.py to capi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/message.py to message.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/reducer.py to reducer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transe.py to transe.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/edgepred.py to edgepred.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transr.py to transr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/linear.py to linear.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dgnconv.py to dgnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/grouprevres.py to grouprevres.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/twirlsconv.py to twirlsconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egatconv.py to egatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gcn2conv.py to gcn2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dotgatconv.py to dotgatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/cfconv.py to cfconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gineconv.py to gineconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatv2conv.py to gatv2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/hgtconv.py to hgtconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/pnaconv.py to pnaconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/atomicconv.py to atomicconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egnnconv.py to egnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/gnnexplainer.py to gnnexplainer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/factory.py to factory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/functional/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/core.py to core.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/subgraph.py to subgraph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/dis_kvstore.py to dis_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/unified_tensor.py to unified_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/graph_store.py to graph_store.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/sampler.py to sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/dis_sampler.py to dis_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/traversal.py to traversal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/checks.py to checks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/exception.py to exception.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/pin_memory.py to pin_memory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/internal.py to internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/filter.py to filter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/data.py to data.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/shared_mem.py to shared_mem.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/pytorch.py to pytorch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph_index.py to heterograph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/global_config.py to global_config.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/graphsaint.py to graphsaint.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/neighbor_sampler.py to neighbor_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_api_internal.py to _api_internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph.py to heterograph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/backend.py to backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/set_default_backend.py to set_default_backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/network.py to network.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/edge_softmax.py to edge_softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/sddmm.py to sddmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/gather_mm.py to gather_mm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/segment.py to segment.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/spmm.py to spmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/logging.py to logging.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/graph_index.py to graph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/nccl.py to nccl.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/pytorch_tensor.py to pytorch_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/numpy.py to numpy.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/init.py to init.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/pinsage.py to pinsage.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/negative.py to negative.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/randomwalks.py to randomwalks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/node2vec_randomwalk.py to node2vec_randomwalk.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/generators.py to generators.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm7b.py to qm7b.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/graph_serialize.py to graph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/dgl_dataset.py to dgl_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset_base.py to csv_dataset_base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9_edge.py to qm9_edge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gnn_benchmark.py to gnn_benchmark.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9.py to qm9.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/icews18.py to icews18.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gdelt.py to gdelt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/sbm.py to sbm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset.py to csv_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/karate.py to karate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/citation_graph.py to citation_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/rdf.py to rdf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/bitcoinotc.py to bitcoinotc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/ppi.py to ppi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tensor_serialize.py to tensor_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/flickr.py to flickr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/wikics.py to wikics.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/heterograph_serialize.py to heterograph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tu.py to tu.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gindt.py to gindt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tree.py to tree.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/synthetic.py to synthetic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fakenews.py to fakenews.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fraud.py to fraud.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/minigc.py to minigc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/yelp.py to yelp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/reddit.py to reddit.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/convert.py to convert.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/container.py to container.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/merge.py to merge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/readout.py to readout.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_graph.py to dist_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_partition_book.py to graph_partition_book.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/standalone_kvstore.py to standalone_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_services.py to graph_services.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/role.py to role.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/constants.py to constants.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_server.py to rpc_server.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/kvstore.py to kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_context.py to dist_context.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc.py to rpc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_client.py to rpc_client.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/id_map.py to id_map.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/server_state.py to server_state.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/shared_mem_utils.py to shared_mem_utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_tensor.py to dist_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/nodeflow.py to nodeflow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/kernel.py to kernel.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/spmv.py to spmv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/degree_bucketing.py to degree_bucketing.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/scheduler.py to scheduler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/runtime.py to runtime.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/program.py to program.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/executor.py to executor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/var.py to var.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/registry.py to registry.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/graph.py to graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/propagate.py to propagate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/batch.py to batch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/functional.py to functional.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/module.py to module.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/libra_partition.py to libra_partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/tools.py to tools.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/runtime_ctypes.py to runtime_ctypes.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/libinfo.py to libinfo.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/streams.py to streams.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/types.py to types.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object_generic.py to object_generic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/random.py to random.cpython-37.pyc creating stub loader for dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/core.py to core.cpython-37.pyc installing package data to build/bdist.linux-x86_64/egg running install_data copying ../build/libdgl.so -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/tensoradapter creating build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch copying ../build/tensoradapter/pytorch/libtensoradapter_pytorch_1.9.0.so -> build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch creating build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/not-zip-safe -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO writing build/bdist.linux-x86_64/egg/EGG-INFO/native_libs.txt creating dist creating 'dist/dgl-0.9-py3.7-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it removing 'build/bdist.linux-x86_64/egg' (and everything under it) Processing dgl-0.9-py3.7-linux-x86_64.egg creating /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Extracting dgl-0.9-py3.7-linux-x86_64.egg to /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Adding dgl 0.9 to easy-install.pth file Installed /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Processing dependencies for dgl==0.9 Searching for psutil>=5.8.0 Reading https://pypi.org/simple/psutil/ Downloading https://files.pythonhosted.org/packages/3d/73/d8c87b5612c58d1e6c6d91997c1590771d34e4ee27d9c11eb1e64ecbf365/psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=4fb54941aac044a61db9d8eb56fc5bee207db3bc58645d657249030e15ba3727 Best match: psutil 5.9.2 Processing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl Installing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl to /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Adding psutil 5.9.2 to easy-install.pth file Installed /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/psutil-5.9.2-py3.7-linux-x86_64.egg Searching for tqdm==4.64.0 Best match: tqdm 4.64.0 Adding tqdm 4.64.0 to easy-install.pth file Installing tqdm script to /opt/conda/envs/mxnet-ci/bin Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for requests==2.28.1 Best match: requests 2.28.1 Adding requests 2.28.1 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for networkx==2.6.3 Best match: networkx 2.6.3 Adding networkx 2.6.3 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for scipy==1.7.3 Best match: scipy 1.7.3 Adding scipy 1.7.3 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for numpy==1.21.6 Best match: numpy 1.21.6 Adding numpy 1.21.6 to easy-install.pth file Installing f2py script to /opt/conda/envs/mxnet-ci/bin Installing f2py3 script to /opt/conda/envs/mxnet-ci/bin Installing f2py3.7 script to /opt/conda/envs/mxnet-ci/bin Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for certifi==2022.6.15 Best match: certifi 2022.6.15 Adding certifi 2022.6.15 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for urllib3==1.26.11 Best match: urllib3 1.26.11 Adding urllib3 1.26.11 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for idna==3.3 Best match: idna 3.3 Adding idna 3.3 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for charset-normalizer==2.1.0 Best match: charset-normalizer 2.1.0 Adding charset-normalizer 2.1.0 to easy-install.pth file Installing normalizer script to /opt/conda/envs/mxnet-ci/bin Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Finished processing dependencies for dgl==0.9 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/setuptools/command/install.py:37: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools. setuptools.SetuptoolsDeprecationWarning, /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/setuptools/command/easy_install.py:147: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools. EasyInstallDeprecationWarning, /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running build_ext building 'dgl._ffi._cy3.core' extension gcc -pthread -B /opt/conda/envs/mxnet-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/mxnet-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/mxnet-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/mxnet-ci/compiler_compat -L/opt/conda/envs/mxnet-ci/lib -Wl,-rpath=/opt/conda/envs/mxnet-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> dgl/_ffi/_cy3 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) WARNING: Skipping dgl as it is not installed. WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running install running bdist_egg running egg_info creating dgl.egg-info writing dgl.egg-info/PKG-INFO writing dependency_links to dgl.egg-info/dependency_links.txt writing requirements to dgl.egg-info/requires.txt writing top-level names to dgl.egg-info/top_level.txt writing manifest file 'dgl.egg-info/SOURCES.txt' reading manifest file 'dgl.egg-info/SOURCES.txt' writing manifest file 'dgl.egg-info/SOURCES.txt' installing library code to build/bdist.linux-x86_64/egg running install_lib running build_py creating build creating build/lib.linux-x86_64-3.7 creating build/lib.linux-x86_64-3.7/dgl copying dgl/partition.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/core.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/subgraph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/traversal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/base.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/sparse.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/global_config.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/_api_internal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/network.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/logging.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/graph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/udf.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/ndarray.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/init.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/view.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/generators.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/convert.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/container.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/merge.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/readout.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/__init__.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/propagate.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/batch.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/frame.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/random.py -> build/lib.linux-x86_64-3.7/dgl creating build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/sp_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/diag_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/reduction.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/__init__.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/elementwise_op_sp.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse creating build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/edge_coarsening.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/fps.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/capi.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/__init__.py -> build/lib.linux-x86_64-3.7/dgl/geometry creating build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/base.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/message.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/reducer.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/__init__.py -> build/lib.linux-x86_64-3.7/dgl/function creating build/lib.linux-x86_64-3.7/dgl/nn copying dgl/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn creating build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/dis_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/unified_tensor.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/graph_store.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib creating build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/checks.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/exception.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/pin_memory.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/internal.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/filter.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/data.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/__init__.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/shared_mem.py -> build/lib.linux-x86_64-3.7/dgl/utils creating build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/pytorch.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/__init__.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing creating build/lib.linux-x86_64-3.7/dgl/optim copying dgl/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim creating build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/base.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/graphsaint.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/neighbor_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading creating build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading creating build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/backend.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/set_default_backend.py -> build/lib.linux-x86_64-3.7/dgl/backend creating build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/edge_softmax.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/sddmm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/gather_mm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/segment.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/__init__.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/spmm.py -> build/lib.linux-x86_64-3.7/dgl/ops creating build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/nccl.py -> build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/__init__.py -> build/lib.linux-x86_64-3.7/dgl/cuda creating build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/pytorch_tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/base.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/__init__.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/numpy.py -> build/lib.linux-x86_64-3.7/dgl/storages creating build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/pinsage.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/negative.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/randomwalks.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/utils.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/node2vec_randomwalk.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/sampling creating build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm7b.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/graph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/dgl_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset_base.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9_edge.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gnn_benchmark.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/icews18.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gdelt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/utils.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/sbm.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/karate.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/citation_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/rdf.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/bitcoinotc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/ppi.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tensor_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/flickr.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/wikics.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/heterograph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tu.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gindt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tree.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/synthetic.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/adapter.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fakenews.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fraud.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/minigc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/yelp.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/reddit.py -> build/lib.linux-x86_64-3.7/dgl/data creating build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_graph.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_partition_book.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/partition.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/standalone_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_services.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/role.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/constants.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_server.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_context.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_client.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/id_map.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/server_state.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/shared_mem_utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_tensor.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/distributed creating build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/nodeflow.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/udf.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/view.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/kernel.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/graph.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/frame.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate creating build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/functional.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/__init__.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/module.py -> build/lib.linux-x86_64-3.7/dgl/transforms creating build/lib.linux-x86_64-3.7/dgl/distgnn copying dgl/distgnn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn creating build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/runtime_ctypes.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/libinfo.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/streams.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/base.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object_generic.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/linear.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/factory.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/nn/functional copying dgl/nn/functional/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/functional creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transe.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/edgepred.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transr.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dgnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/grouprevres.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/twirlsconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gcn2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dotgatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/cfconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gineconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatv2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/hgtconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/pnaconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/atomicconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/gnnexplainer.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv creating build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/dis_sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling creating build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data creating build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/optim/mxnet copying dgl/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/optim/tensorflow copying dgl/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet creating build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/nn copying dgl/distributed/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn creating build/lib.linux-x86_64-3.7/dgl/distributed/optim copying dgl/distributed/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet copying dgl/distributed/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow copying dgl/distributed/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet copying dgl/distributed/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow copying dgl/distributed/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/spmv.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/degree_bucketing.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/scheduler.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/adapter.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/runtime.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/program.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/executor.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/var.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/registry.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir creating build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/libra_partition.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition creating build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/tools.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools creating build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/types.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 copying dgl/_ffi/_cy3/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 copying dgl/_ffi/_cy2/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cython copying dgl/_ffi/_cython/core.cpp -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cython running build_ext building 'dgl._ffi._cy3.core' extension creating build/temp.linux-x86_64-3.7 creating build/temp.linux-x86_64-3.7/dgl creating build/temp.linux-x86_64-3.7/dgl/_ffi creating build/temp.linux-x86_64-3.7/dgl/_ffi/_cython gcc -pthread -B /opt/conda/envs/tensorflow-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/tensorflow-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/tensorflow-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/tensorflow-ci/compiler_compat -L/opt/conda/envs/tensorflow-ci/lib -Wl,-rpath=/opt/conda/envs/tensorflow-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 creating build/bdist.linux-x86_64 creating build/bdist.linux-x86_64/egg creating build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/sp_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/diag_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/reduction.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/__init__.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/elementwise_op_sp.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse creating build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/edge_coarsening.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/fps.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/capi.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/__init__.py -> build/bdist.linux-x86_64/egg/dgl/geometry creating build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/base.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/message.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/reducer.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/__init__.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/partition.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transe.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/edgepred.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transr.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/linear.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dgnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/grouprevres.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/twirlsconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gcn2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dotgatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/cfconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gineconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatv2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/hgtconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/pnaconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/atomicconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/gnnexplainer.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/factory.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/nn/functional/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/core.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/subgraph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/dis_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/unified_tensor.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/graph_store.py -> build/bdist.linux-x86_64/egg/dgl/contrib creating build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/dis_sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling creating build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/traversal.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/checks.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/exception.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/pin_memory.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/internal.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/filter.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/data.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/__init__.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/shared_mem.py -> build/bdist.linux-x86_64/egg/dgl/utils creating build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/pytorch.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/__init__.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/base.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/heterograph_index.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/sparse.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/global_config.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/base.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/graphsaint.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/neighbor_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_api_internal.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/heterograph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/backend/backend.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch creating build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/set_default_backend.py -> build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/network.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/edge_softmax.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/sddmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/gather_mm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/segment.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/__init__.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/spmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/logging.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/graph_index.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/nccl.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/__init__.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/udf.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/pytorch_tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/base.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/__init__.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/numpy.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/ndarray.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/init.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/view.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/pinsage.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/negative.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/randomwalks.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/utils.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/node2vec_randomwalk.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/generators.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm7b.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/graph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/dgl_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset_base.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9_edge.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gnn_benchmark.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/icews18.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gdelt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/utils.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/sbm.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/karate.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/citation_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/rdf.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/bitcoinotc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/ppi.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tensor_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/flickr.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/wikics.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/heterograph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tu.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gindt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tree.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/synthetic.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/adapter.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fakenews.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fraud.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/minigc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/yelp.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/reddit.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/convert.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/container.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/merge.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/readout.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/__init__.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_graph.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_partition_book.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/partition.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/standalone_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_services.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/role.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/constants.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_server.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_context.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_client.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/id_map.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/server_state.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/shared_mem_utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_tensor.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/nodeflow.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/udf.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/view.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/kernel.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/spmv.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/degree_bucketing.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/scheduler.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/adapter.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/runtime.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/program.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/executor.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/var.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/registry.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/graph.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/frame.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/propagate.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/batch.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/functional.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/__init__.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/module.py -> build/bdist.linux-x86_64/egg/dgl/transforms creating build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/libra_partition.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition creating build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/tools.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/runtime_ctypes.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/libinfo.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/streams.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/base.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/types.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object_generic.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cython/core.cpp -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/frame.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/random.py -> build/bdist.linux-x86_64/egg/dgl byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/sp_matrix.py to sp_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/diag_matrix.py to diag_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/reduction.py to reduction.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/elementwise_op_sp.py to elementwise_op_sp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/edge_coarsening.py to edge_coarsening.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/fps.py to fps.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/capi.py to capi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/message.py to message.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/reducer.py to reducer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transe.py to transe.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/edgepred.py to edgepred.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transr.py to transr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/linear.py to linear.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dgnconv.py to dgnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/grouprevres.py to grouprevres.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/twirlsconv.py to twirlsconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egatconv.py to egatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gcn2conv.py to gcn2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dotgatconv.py to dotgatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/cfconv.py to cfconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gineconv.py to gineconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatv2conv.py to gatv2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/hgtconv.py to hgtconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/pnaconv.py to pnaconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/atomicconv.py to atomicconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egnnconv.py to egnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/gnnexplainer.py to gnnexplainer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/factory.py to factory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/functional/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/core.py to core.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/subgraph.py to subgraph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/dis_kvstore.py to dis_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/unified_tensor.py to unified_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/graph_store.py to graph_store.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/sampler.py to sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/dis_sampler.py to dis_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/traversal.py to traversal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/checks.py to checks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/exception.py to exception.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/pin_memory.py to pin_memory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/internal.py to internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/filter.py to filter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/data.py to data.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/shared_mem.py to shared_mem.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/pytorch.py to pytorch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph_index.py to heterograph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/global_config.py to global_config.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/graphsaint.py to graphsaint.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/neighbor_sampler.py to neighbor_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_api_internal.py to _api_internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph.py to heterograph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/backend.py to backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/set_default_backend.py to set_default_backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/network.py to network.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/edge_softmax.py to edge_softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/sddmm.py to sddmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/gather_mm.py to gather_mm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/segment.py to segment.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/spmm.py to spmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/logging.py to logging.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/graph_index.py to graph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/nccl.py to nccl.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/pytorch_tensor.py to pytorch_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/numpy.py to numpy.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/init.py to init.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/pinsage.py to pinsage.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/negative.py to negative.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/randomwalks.py to randomwalks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/node2vec_randomwalk.py to node2vec_randomwalk.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/generators.py to generators.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm7b.py to qm7b.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/graph_serialize.py to graph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/dgl_dataset.py to dgl_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset_base.py to csv_dataset_base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9_edge.py to qm9_edge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gnn_benchmark.py to gnn_benchmark.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9.py to qm9.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/icews18.py to icews18.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gdelt.py to gdelt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/sbm.py to sbm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset.py to csv_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/karate.py to karate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/citation_graph.py to citation_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/rdf.py to rdf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/bitcoinotc.py to bitcoinotc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/ppi.py to ppi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tensor_serialize.py to tensor_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/flickr.py to flickr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/wikics.py to wikics.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/heterograph_serialize.py to heterograph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tu.py to tu.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gindt.py to gindt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tree.py to tree.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/synthetic.py to synthetic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fakenews.py to fakenews.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fraud.py to fraud.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/minigc.py to minigc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/yelp.py to yelp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/reddit.py to reddit.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/convert.py to convert.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/container.py to container.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/merge.py to merge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/readout.py to readout.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_graph.py to dist_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_partition_book.py to graph_partition_book.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/standalone_kvstore.py to standalone_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_services.py to graph_services.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/role.py to role.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/constants.py to constants.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_server.py to rpc_server.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/kvstore.py to kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_context.py to dist_context.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc.py to rpc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_client.py to rpc_client.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/id_map.py to id_map.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/server_state.py to server_state.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/shared_mem_utils.py to shared_mem_utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_tensor.py to dist_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/nodeflow.py to nodeflow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/kernel.py to kernel.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/spmv.py to spmv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/degree_bucketing.py to degree_bucketing.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/scheduler.py to scheduler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/runtime.py to runtime.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/program.py to program.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/executor.py to executor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/var.py to var.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/registry.py to registry.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/graph.py to graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/propagate.py to propagate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/batch.py to batch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/functional.py to functional.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/module.py to module.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/libra_partition.py to libra_partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/tools.py to tools.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/runtime_ctypes.py to runtime_ctypes.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/libinfo.py to libinfo.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/streams.py to streams.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/types.py to types.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object_generic.py to object_generic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/random.py to random.cpython-37.pyc creating stub loader for dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/core.py to core.cpython-37.pyc installing package data to build/bdist.linux-x86_64/egg running install_data copying ../build/libdgl.so -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/tensoradapter creating build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch copying ../build/tensoradapter/pytorch/libtensoradapter_pytorch_1.9.0.so -> build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch creating build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/not-zip-safe -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO writing build/bdist.linux-x86_64/egg/EGG-INFO/native_libs.txt creating dist creating 'dist/dgl-0.9-py3.7-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it removing 'build/bdist.linux-x86_64/egg' (and everything under it) Processing dgl-0.9-py3.7-linux-x86_64.egg creating /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Extracting dgl-0.9-py3.7-linux-x86_64.egg to /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Adding dgl 0.9 to easy-install.pth file Installed /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Processing dependencies for dgl==0.9 Searching for psutil>=5.8.0 Reading https://pypi.org/simple/psutil/ Downloading https://files.pythonhosted.org/packages/3d/73/d8c87b5612c58d1e6c6d91997c1590771d34e4ee27d9c11eb1e64ecbf365/psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=4fb54941aac044a61db9d8eb56fc5bee207db3bc58645d657249030e15ba3727 Best match: psutil 5.9.2 Processing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl Installing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl to /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Adding psutil 5.9.2 to easy-install.pth file Installed /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/psutil-5.9.2-py3.7-linux-x86_64.egg Searching for tqdm==4.64.0 Best match: tqdm 4.64.0 Adding tqdm 4.64.0 to easy-install.pth file Installing tqdm script to /opt/conda/envs/tensorflow-ci/bin Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for requests==2.28.1 Best match: requests 2.28.1 Adding requests 2.28.1 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for networkx==2.6.3 Best match: networkx 2.6.3 Adding networkx 2.6.3 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for scipy==1.4.1 Best match: scipy 1.4.1 Adding scipy 1.4.1 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for numpy==1.18.5 Best match: numpy 1.18.5 Adding numpy 1.18.5 to easy-install.pth file Installing f2py script to /opt/conda/envs/tensorflow-ci/bin Installing f2py3 script to /opt/conda/envs/tensorflow-ci/bin Installing f2py3.7 script to /opt/conda/envs/tensorflow-ci/bin Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for certifi==2022.6.15 Best match: certifi 2022.6.15 Adding certifi 2022.6.15 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for urllib3==1.26.11 Best match: urllib3 1.26.11 Adding urllib3 1.26.11 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for idna==3.3 Best match: idna 3.3 Adding idna 3.3 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for charset-normalizer==2.1.0 Best match: charset-normalizer 2.1.0 Adding charset-normalizer 2.1.0 to easy-install.pth file Installing normalizer script to /opt/conda/envs/tensorflow-ci/bin Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Finished processing dependencies for dgl==0.9 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/command/install.py:37: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools. setuptools.SetuptoolsDeprecationWarning, /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/command/easy_install.py:147: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools. EasyInstallDeprecationWarning, /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running build_ext building 'dgl._ffi._cy3.core' extension gcc -pthread -B /opt/conda/envs/tensorflow-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/tensorflow-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/tensorflow-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/tensorflow-ci/compiler_compat -L/opt/conda/envs/tensorflow-ci/lib -Wl,-rpath=/opt/conda/envs/tensorflow-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> dgl/_ffi/_cy3 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) ~/jenkins/workspace/dgl_PR-4648 [Pipeline] sh + ls -lh /usr/lib/x86_64-linux-gnu/ total 654M -rw-r--r-- 1 root root 496 May 3 10:19 Mcrt1.o -rw-r--r-- 1 root root 1.8K May 3 10:19 Scrt1.o drwxr-xr-x 2 root root 4.0K Aug 1 13:21 audit drwxr-xr-x 2 root root 4.0K Aug 16 08:41 avahi drwxr-xr-x 2 root root 4.0K Aug 1 13:21 coreutils -rw-r--r-- 1 root root 1.9K May 3 10:19 crt1.o -rw-r--r-- 1 root root 1.2K May 3 10:19 crti.o -rw-r--r-- 1 root root 648 May 3 10:19 crtn.o drwxr-xr-x 2 root root 4.0K Aug 16 08:41 dri drwxr-xr-x 2 root root 4.0K Aug 16 08:40 engines-1.1 drwxr-xr-x 2 root root 12K Aug 1 13:21 gconv -rw-r--r-- 1 root root 2.5K May 3 10:19 gcrt1.o drwxr-xr-x 2 root root 4.0K Aug 16 08:41 gdcm-2.8 drwxr-xr-x 3 root root 4.0K Aug 16 08:40 gdk-pixbuf-2.0 drwxr-xr-x 3 root root 4.0K Aug 16 08:40 gio drwxr-xr-x 2 root root 4.0K Aug 16 08:41 girepository-1.0 drwxr-xr-x 3 root root 4.0K Aug 16 08:41 glib-2.0 drwxr-xr-x 2 root root 4.0K Aug 16 08:41 graphviz -rw-r--r-- 1 root root 2.3K May 3 10:19 grcrt1.o drwxr-xr-x 3 root root 4.0K Aug 16 08:41 gtk-2.0 drwxr-xr-x 3 root root 4.0K Aug 16 08:41 gtk-3.0 drwxr-xr-x 2 root root 4.0K Aug 16 08:41 hwloc drwxr-xr-x 3 root root 4.0K Aug 16 08:41 icu drwxr-xr-x 3 root root 4.0K Aug 16 08:40 krb5 drwxr-xr-x 2 root root 4.0K Aug 16 08:40 ldscripts -rw-r--r-- 1 root root 2.0K May 3 10:19 libBrokenLocale.a lrwxrwxrwx 1 root root 42 May 3 10:19 libBrokenLocale.so -> /lib/x86_64-linux-gnu/libBrokenLocale.so.1 lrwxrwxrwx 1 root root 16 May 23 2016 libCharLS.so.1 -> libCharLS.so.1.0 -rw-r--r-- 1 root root 219K May 23 2016 libCharLS.so.1.0 lrwxrwxrwx 1 root root 14 May 10 2019 libGL.so.1 -> libGL.so.1.0.0 -rw-r--r-- 1 root root 555K May 10 2019 libGL.so.1.0.0 lrwxrwxrwx 1 root root 15 May 10 2019 libGLX.so.0 -> libGLX.so.0.0.0 -rw-r--r-- 1 root root 67K May 10 2019 libGLX.so.0.0.0 lrwxrwxrwx 1 root root 16 Jun 12 2020 libGLX_indirect.so.0 -> libGLX_mesa.so.0 lrwxrwxrwx 1 root root 20 Jun 12 2020 libGLX_mesa.so.0 -> libGLX_mesa.so.0.0.0 -rw-r--r-- 1 root root 477K Jun 12 2020 libGLX_mesa.so.0.0.0 lrwxrwxrwx 1 root root 22 May 10 2019 libGLdispatch.so.0 -> libGLdispatch.so.0.0.0 -rw-r--r-- 1 root root 599K May 10 2019 libGLdispatch.so.0.0.0 lrwxrwxrwx 1 root root 17 Apr 16 2016 libHalf.so -> libHalf.so.12.0.0 lrwxrwxrwx 1 root root 17 Apr 16 2016 libHalf.so.12 -> libHalf.so.12.0.0 -rw-r--r-- 1 root root 267K Apr 16 2016 libHalf.so.12.0.0 lrwxrwxrwx 1 root root 15 Feb 28 2017 libICE.so.6 -> libICE.so.6.3.0 -rw-r--r-- 1 root root 96K Feb 28 2017 libICE.so.6.3.0 lrwxrwxrwx 1 root root 20 Apr 16 2016 libIex-2_2.so.12 -> libIex-2_2.so.12.0.0 -rw-r--r-- 1 root root 119K Apr 16 2016 libIex-2_2.so.12.0.0 lrwxrwxrwx 1 root root 20 Apr 16 2016 libIex.so -> libIex-2_2.so.12.0.0 lrwxrwxrwx 1 root root 24 Apr 16 2016 libIexMath-2_2.so.12 -> libIexMath-2_2.so.12.0.0 -rw-r--r-- 1 root root 15K Apr 16 2016 libIexMath-2_2.so.12.0.0 lrwxrwxrwx 1 root root 24 Apr 16 2016 libIexMath.so -> libIexMath-2_2.so.12.0.0 lrwxrwxrwx 1 root root 23 Nov 16 2021 libIlmImf-2_2.so.22 -> libIlmImf-2_2.so.22.0.0 -rw-r--r-- 1 root root 2.8M Nov 16 2021 libIlmImf-2_2.so.22.0.0 -rw-r--r-- 1 root root 4.4M Nov 16 2021 libIlmImf.a lrwxrwxrwx 1 root root 23 Nov 16 2021 libIlmImf.so -> libIlmImf-2_2.so.22.0.0 lrwxrwxrwx 1 root root 27 Nov 16 2021 libIlmImfUtil-2_2.so.22 -> libIlmImfUtil-2_2.so.22.0.0 -rw-r--r-- 1 root root 140K Nov 16 2021 libIlmImfUtil-2_2.so.22.0.0 -rw-r--r-- 1 root root 319K Nov 16 2021 libIlmImfUtil.a lrwxrwxrwx 1 root root 27 Nov 16 2021 libIlmImfUtil.so -> libIlmImfUtil-2_2.so.22.0.0 lrwxrwxrwx 1 root root 26 Apr 16 2016 libIlmThread-2_2.so.12 -> libIlmThread-2_2.so.12.0.0 -rw-r--r-- 1 root root 27K Apr 16 2016 libIlmThread-2_2.so.12.0.0 lrwxrwxrwx 1 root root 26 Apr 16 2016 libIlmThread.so -> libIlmThread-2_2.so.12.0.0 lrwxrwxrwx 1 root root 22 Apr 16 2016 libImath-2_2.so.12 -> libImath-2_2.so.12.0.0 -rw-r--r-- 1 root root 71K Apr 16 2016 libImath-2_2.so.12.0.0 lrwxrwxrwx 1 root root 22 Apr 16 2016 libImath.so -> libImath-2_2.so.12.0.0 lrwxrwxrwx 1 root root 15 Jul 14 2020 libLLVM-10.so -> libLLVM-10.so.1 -rw-r--r-- 1 root root 71M Jul 14 2020 libLLVM-10.so.1 lrwxrwxrwx 1 root root 18 Apr 5 2017 libOpenCL.so.1 -> libOpenCL.so.1.0.0 -rw-r--r-- 1 root root 43K Apr 5 2017 libOpenCL.so.1.0.0 lrwxrwxrwx 1 root root 14 Jul 13 2014 libSM.so.6 -> libSM.so.6.0.1 -rw-r--r-- 1 root root 31K Jul 13 2014 libSM.so.6.0.1 lrwxrwxrwx 1 root root 19 May 19 2021 libX11-xcb.so.1 -> libX11-xcb.so.1.0.0 -rw-r--r-- 1 root root 5.7K May 19 2021 libX11-xcb.so.1.0.0 lrwxrwxrwx 1 root root 15 May 19 2021 libX11.so.6 -> libX11.so.6.3.0 -rw-r--r-- 1 root root 1.3M May 19 2021 libX11.so.6.3.0 lrwxrwxrwx 1 root root 15 Apr 21 2020 libXau.so.6 -> libXau.so.6.0.0 -rw-r--r-- 1 root root 15K Apr 21 2020 libXau.so.6.0.0 lrwxrwxrwx 1 root root 12 Aug 3 2015 libXaw.so.7 -> libXaw7.so.7 lrwxrwxrwx 1 root root 16 Aug 3 2015 libXaw7.so.7 -> libXaw7.so.7.0.0 -rw-r--r-- 1 root root 461K Aug 3 2015 libXaw7.so.7.0.0 lrwxrwxrwx 1 root root 22 Jan 20 2017 libXcomposite.so.1 -> libXcomposite.so.1.0.0 -rw-r--r-- 1 root root 11K Jan 20 2017 libXcomposite.so.1.0.0 lrwxrwxrwx 1 root root 19 Dec 19 2017 libXcursor.so.1 -> libXcursor.so.1.0.2 -rw-r--r-- 1 root root 39K Dec 19 2017 libXcursor.so.1.0.2 lrwxrwxrwx 1 root root 19 Aug 20 2017 libXdamage.so.1 -> libXdamage.so.1.1.0 -rw-r--r-- 1 root root 11K Aug 20 2017 libXdamage.so.1.1.0 lrwxrwxrwx 1 root root 17 Mar 2 2017 libXdmcp.so.6 -> libXdmcp.so.6.0.0 -rw-r--r-- 1 root root 23K Mar 2 2017 libXdmcp.so.6.0.0 lrwxrwxrwx 1 root root 16 Oct 25 2014 libXext.so.6 -> libXext.so.6.4.0 -rw-r--r-- 1 root root 72K Oct 25 2014 libXext.so.6.4.0 lrwxrwxrwx 1 root root 18 Dec 5 2016 libXfixes.so.3 -> libXfixes.so.3.1.0 -rw-r--r-- 1 root root 23K Dec 5 2016 libXfixes.so.3.1.0 lrwxrwxrwx 1 root root 15 Jul 13 2014 libXft.so.2 -> libXft.so.2.3.2 -rw-r--r-- 1 root root 84K Jul 13 2014 libXft.so.2.3.2 lrwxrwxrwx 1 root root 14 Jan 23 2017 libXi.so.6 -> libXi.so.6.1.0 -rw-r--r-- 1 root root 63K Jan 23 2017 libXi.so.6.1.0 lrwxrwxrwx 1 root root 20 Jun 30 2013 libXinerama.so.1 -> libXinerama.so.1.0.0 -rw-r--r-- 1 root root 11K Jun 30 2013 libXinerama.so.1.0.0 lrwxrwxrwx 1 root root 15 Dec 10 2015 libXmu.so.6 -> libXmu.so.6.2.0 -rw-r--r-- 1 root root 101K Dec 10 2015 libXmu.so.6.2.0 lrwxrwxrwx 1 root root 16 Dec 22 2016 libXpm.so.4 -> libXpm.so.4.11.0 -rw-r--r-- 1 root root 71K Dec 22 2016 libXpm.so.4.11.0 lrwxrwxrwx 1 root root 18 Dec 6 2016 libXrandr.so.2 -> libXrandr.so.2.2.0 -rw-r--r-- 1 root root 43K Dec 6 2016 libXrandr.so.2.2.0 lrwxrwxrwx 1 root root 19 Dec 5 2016 libXrender.so.1 -> libXrender.so.1.3.0 -rw-r--r-- 1 root root 39K Dec 5 2016 libXrender.so.1.3.0 lrwxrwxrwx 1 root root 15 May 9 2012 libXss.so.1 -> libXss.so.1.0.0 -rw-r--r-- 1 root root 15K May 9 2012 libXss.so.1.0.0 lrwxrwxrwx 1 root root 14 May 24 2016 libXt.so.6 -> libXt.so.6.0.0 -rw-r--r-- 1 root root 416K May 24 2016 libXt.so.6.0.0 lrwxrwxrwx 1 root root 19 May 6 2015 libXxf86vm.so.1 -> libXxf86vm.so.1.0.0 -rw-r--r-- 1 root root 23K May 6 2015 libXxf86vm.so.1.0.0 lrwxrwxrwx 1 root root 15 Jul 28 2017 libaec.so.0 -> libaec.so.0.0.3 -rw-r--r-- 1 root root 30K Jul 28 2017 libaec.so.0.0.3 -rw-r--r-- 1 root root 23K May 3 10:19 libanl.a lrwxrwxrwx 1 root root 33 May 3 10:19 libanl.so -> /lib/x86_64-linux-gnu/libanl.so.1 lrwxrwxrwx 1 root root 20 Jun 15 2021 libapt-inst.so.2.0 -> libapt-inst.so.2.0.0 -rw-r--r-- 1 root root 51K Jun 15 2021 libapt-inst.so.2.0.0 lrwxrwxrwx 1 root root 19 Jun 15 2021 libapt-pkg.so.5.0 -> libapt-pkg.so.5.0.2 -rw-r--r-- 1 root root 1.8M Jun 15 2021 libapt-pkg.so.5.0.2 lrwxrwxrwx 1 root root 23 Jun 15 2021 libapt-private.so.0.0 -> libapt-private.so.0.0.0 -rw-r--r-- 1 root root 415K Jun 15 2021 libapt-private.so.0.0.0 lrwxrwxrwx 1 root root 18 Jan 24 2018 libarpack.so.2 -> libarpack.so.2.0.0 -rw-r--r-- 1 root root 295K Jan 24 2018 libarpack.so.2.0.0 lrwxrwxrwx 1 root root 16 Dec 4 2019 libasan.so.4 -> libasan.so.4.0.0 -rw-r--r-- 1 root root 1.4M Dec 4 2019 libasan.so.4.0.0 lrwxrwxrwx 1 root root 16 Dec 15 2017 libasn1.so.8 -> libasn1.so.8.0.0 -rw-r--r-- 1 root root 647K Dec 15 2017 libasn1.so.8.0.0 lrwxrwxrwx 1 root root 23 Mar 13 2018 libatk-1.0.so.0 -> libatk-1.0.so.0.22810.1 -rw-r--r-- 1 root root 150K Mar 13 2018 libatk-1.0.so.0.22810.1 lrwxrwxrwx 1 root root 26 Mar 13 2018 libatk-bridge-2.0.so.0 -> libatk-bridge-2.0.so.0.0.0 -rw-r--r-- 1 root root 194K Mar 13 2018 libatk-bridge-2.0.so.0.0.0 lrwxrwxrwx 1 root root 18 Mar 10 2020 libatomic.so.1 -> libatomic.so.1.2.0 -rw-r--r-- 1 root root 27K Mar 10 2020 libatomic.so.1.2.0 lrwxrwxrwx 1 root root 17 Mar 13 2018 libatspi.so.0 -> libatspi.so.0.0.1 -rw-r--r-- 1 root root 191K Mar 13 2018 libatspi.so.0.0.1 lrwxrwxrwx 1 root root 24 Jul 6 2021 libavahi-client.so.3 -> libavahi-client.so.3.2.9 -rw-r--r-- 1 root root 67K Jul 6 2021 libavahi-client.so.3.2.9 lrwxrwxrwx 1 root root 24 Jul 6 2021 libavahi-common.so.3 -> libavahi-common.so.3.5.3 -rw-r--r-- 1 root root 47K Jul 6 2021 libavahi-common.so.3.5.3 -rw-r--r-- 1 root root 19M May 18 20:01 libavcodec.a lrwxrwxrwx 1 root root 24 May 18 20:01 libavcodec.so -> libavcodec.so.57.107.100 lrwxrwxrwx 1 root root 24 May 18 20:01 libavcodec.so.57 -> libavcodec.so.57.107.100 -rw-r--r-- 1 root root 14M May 18 20:01 libavcodec.so.57.107.100 -rw-r--r-- 1 root root 4.7M May 18 20:01 libavformat.a lrwxrwxrwx 1 root root 24 May 18 20:01 libavformat.so -> libavformat.so.57.83.100 lrwxrwxrwx 1 root root 24 May 18 20:01 libavformat.so.57 -> libavformat.so.57.83.100 -rw-r--r-- 1 root root 2.4M May 18 20:01 libavformat.so.57.83.100 -rw-r--r-- 1 root root 214K May 18 20:01 libavresample.a lrwxrwxrwx 1 root root 22 May 18 20:01 libavresample.so -> libavresample.so.3.7.0 lrwxrwxrwx 1 root root 22 May 18 20:01 libavresample.so.3 -> libavresample.so.3.7.0 -rw-r--r-- 1 root root 130K May 18 20:01 libavresample.so.3.7.0 -rw-r--r-- 1 root root 755K May 18 20:01 libavutil.a lrwxrwxrwx 1 root root 22 May 18 20:01 libavutil.so -> libavutil.so.55.78.100 lrwxrwxrwx 1 root root 22 May 18 20:01 libavutil.so.55 -> libavutil.so.55.78.100 -rw-r--r-- 1 root root 487K May 18 20:01 libavutil.so.55.78.100 -rw-r--r-- 1 root root 1.3M Oct 20 2021 libbfd-2.30-system.so lrwxrwxrwx 1 root root 44 Aug 16 08:41 libblas.a -> /etc/alternatives/libblas.a-x86_64-linux-gnu lrwxrwxrwx 1 root root 45 Aug 16 08:41 libblas.so -> /etc/alternatives/libblas.so-x86_64-linux-gnu lrwxrwxrwx 1 root root 47 Aug 16 08:41 libblas.so.3 -> /etc/alternatives/libblas.so.3-x86_64-linux-gnu lrwxrwxrwx 1 root root 18 Apr 3 2018 libbluray.so.2 -> libbluray.so.2.0.2 -rw-r--r-- 1 root root 319K Apr 3 2018 libbluray.so.2.0.2 -rw-r--r-- 1 root root 5.3M May 3 10:19 libc.a -rw-r--r-- 1 root root 298 May 3 10:19 libc.so -rw-r--r-- 1 root root 20K May 3 10:19 libc_nonshared.a lrwxrwxrwx 1 root root 29 Jan 22 2019 libcairo-gobject.so.2 -> libcairo-gobject.so.2.11510.0 -rw-r--r-- 1 root root 34K Jan 22 2019 libcairo-gobject.so.2.11510.0 lrwxrwxrwx 1 root root 21 Jan 22 2019 libcairo.so.2 -> libcairo.so.2.11510.0 -rw-r--r-- 1 root root 1.2M Jan 22 2019 libcairo.so.2.11510.0 lrwxrwxrwx 1 root root 15 Mar 10 2020 libcc1.so.0 -> libcc1.so.0.0.0 -rw-r--r-- 1 root root 116K Mar 10 2020 libcc1.so.0.0.0 lrwxrwxrwx 1 root root 15 Mar 24 2018 libcdt.so -> libcdt.so.5.0.0 lrwxrwxrwx 1 root root 15 Mar 24 2018 libcdt.so.5 -> libcdt.so.5.0.0 -rw-r--r-- 1 root root 27K Mar 24 2018 libcdt.so.5.0.0 lrwxrwxrwx 1 root root 18 Mar 24 2018 libcgraph.so -> libcgraph.so.6.0.0 lrwxrwxrwx 1 root root 18 Mar 24 2018 libcgraph.so.6 -> libcgraph.so.6.0.0 -rw-r--r-- 1 root root 88K Mar 24 2018 libcgraph.so.6.0.0 lrwxrwxrwx 1 root root 23 Feb 7 2018 libchromaprint.so.1 -> libchromaprint.so.1.4.3 -rw-r--r-- 1 root root 75K Feb 7 2018 libchromaprint.so.1.4.3 lrwxrwxrwx 1 root root 34 May 3 10:19 libcidn.so -> /lib/x86_64-linux-gnu/libcidn.so.1 lrwxrwxrwx 1 root root 19 Dec 4 2019 libcilkrts.so.5 -> libcilkrts.so.5.0.0 -rw-r--r-- 1 root root 117K Dec 4 2019 libcilkrts.so.5.0.0 lrwxrwxrwx 1 root root 18 Jul 23 2017 libcolord.so.2 -> libcolord.so.2.0.5 -rw-r--r-- 1 root root 126K Jul 23 2017 libcolord.so.2.0.5 lrwxrwxrwx 1 root root 25 Jul 23 2017 libcolordprivate.so.2 -> libcolordprivate.so.2.0.5 -rw-r--r-- 1 root root 188K Jul 23 2017 libcolordprivate.so.2.0.5 lrwxrwxrwx 1 root root 21 Dec 16 2017 libcroco-0.6.so.3 -> libcroco-0.6.so.3.0.1 -rw-r--r-- 1 root root 235K Dec 16 2017 libcroco-0.6.so.3.0.1 -rw-r--r-- 1 root root 61K May 3 10:19 libcrypt.a lrwxrwxrwx 1 root root 35 May 3 10:19 libcrypt.so -> /lib/x86_64-linux-gnu/libcrypt.so.1 -rw-r--r-- 1 root root 2.8M Jul 4 11:25 libcrypto.so.1.1 lrwxrwxrwx 1 root root 19 Mar 13 2017 libcrystalhd.so.3 -> libcrystalhd.so.3.6 -rw-r--r-- 1 root root 109K Mar 13 2017 libcrystalhd.so.3.6 -rw-r--r-- 1 root root 558K May 27 15:03 libcups.so.2 lrwxrwxrwx 1 root root 19 Jun 22 16:00 libcurl-gnutls.so.3 -> libcurl-gnutls.so.4 lrwxrwxrwx 1 root root 23 Jun 22 16:00 libcurl-gnutls.so.4 -> libcurl-gnutls.so.4.5.0 -rw-r--r-- 1 root root 515K Jun 22 16:00 libcurl-gnutls.so.4.5.0 lrwxrwxrwx 1 root root 16 Jun 22 16:00 libcurl.so.4 -> libcurl.so.4.5.0 -rw-r--r-- 1 root root 519K Jun 22 16:00 libcurl.so.4.5.0 lrwxrwxrwx 1 root root 16 Feb 20 2018 libdap.so.25 -> libdap.so.25.0.1 -rw-r--r-- 1 root root 1.6M Feb 20 2018 libdap.so.25.0.1 lrwxrwxrwx 1 root root 21 Feb 20 2018 libdapclient.so.6 -> libdapclient.so.6.1.7 -rw-r--r-- 1 root root 259K Feb 20 2018 libdapclient.so.6.1.7 lrwxrwxrwx 1 root root 18 Feb 14 2018 libdatrie.so.1 -> libdatrie.so.1.3.3 -rw-r--r-- 1 root root 26K Feb 14 2018 libdatrie.so.1.3.3 -rw-r--r-- 1 root root 1.7M Jun 3 2019 libdb-5.3.so -rw-r--r-- 1 root root 365K Jan 15 2017 libdc1394.a lrwxrwxrwx 1 root root 19 Jan 15 2017 libdc1394.so -> libdc1394.so.22.2.1 lrwxrwxrwx 1 root root 19 Jan 15 2017 libdc1394.so.22 -> libdc1394.so.22.2.1 -rw-r--r-- 1 root root 216K Jan 15 2017 libdc1394.so.22.2.1 lrwxrwxrwx 1 root root 17 Mar 29 2018 libdconf.so.1 -> libdconf.so.1.0.0 -rw-r--r-- 1 root root 56K Mar 29 2018 libdconf.so.1.0.0 lrwxrwxrwx 1 root root 25 Jun 8 2016 libdebconfclient.so.0 -> libdebconfclient.so.0.0.0 -rw-r--r-- 1 root root 11K Jun 8 2016 libdebconfclient.so.0.0.0 -rw-r--r-- 1 root root 12K May 3 10:19 libdl.a lrwxrwxrwx 1 root root 32 May 3 10:19 libdl.so -> /lib/x86_64-linux-gnu/libdl.so.2 lrwxrwxrwx 1 root root 15 May 11 2020 libdrm.so.2 -> libdrm.so.2.4.0 -rw-r--r-- 1 root root 67K May 11 2020 libdrm.so.2.4.0 lrwxrwxrwx 1 root root 22 May 11 2020 libdrm_amdgpu.so.1 -> libdrm_amdgpu.so.1.0.0 -rw-r--r-- 1 root root 39K May 11 2020 libdrm_amdgpu.so.1.0.0 lrwxrwxrwx 1 root root 21 May 11 2020 libdrm_intel.so.1 -> libdrm_intel.so.1.0.0 -rw-r--r-- 1 root root 140K May 11 2020 libdrm_intel.so.1.0.0 lrwxrwxrwx 1 root root 23 May 11 2020 libdrm_nouveau.so.2 -> libdrm_nouveau.so.2.0.0 -rw-r--r-- 1 root root 31K May 11 2020 libdrm_nouveau.so.2.0.0 lrwxrwxrwx 1 root root 22 May 11 2020 libdrm_radeon.so.1 -> libdrm_radeon.so.1.0.1 -rw-r--r-- 1 root root 47K May 11 2020 libdrm_radeon.so.1.0.1 lrwxrwxrwx 1 root root 17 Jun 19 2017 libedit.so.2 -> libedit.so.2.0.56 -rw-r--r-- 1 root root 204K Jun 19 2017 libedit.so.2.0.56 -rw-r--r-- 1 root root 103K Jun 7 2019 libelf-0.170.so lrwxrwxrwx 1 root root 15 Jun 7 2019 libelf.so.1 -> libelf-0.170.so lrwxrwxrwx 1 root root 17 Nov 13 2017 libepoxy.so.0 -> libepoxy.so.0.0.0 -rw-r--r-- 1 root root 1.1M Nov 13 2017 libepoxy.so.0.0.0 lrwxrwxrwx 1 root root 19 Apr 27 2016 libepsilon.so.1 -> libepsilon.so.1.0.0 -rw-r--r-- 1 root root 97K Apr 27 2016 libepsilon.so.1.0.0 -rw-r--r-- 1 root root 400K Nov 6 2020 libexif.a lrwxrwxrwx 1 root root 17 Nov 6 2020 libexif.so -> libexif.so.12.3.3 lrwxrwxrwx 1 root root 17 Nov 6 2020 libexif.so.12 -> libexif.so.12.3.3 -rw-r--r-- 1 root root 274K Nov 6 2020 libexif.so.12.3.3 lrwxrwxrwx 1 root root 18 Mar 8 2022 libexpatw.so.1 -> libexpatw.so.1.6.7 -rw-r--r-- 1 root root 203K Mar 8 2022 libexpatw.so.1.6.7 lrwxrwxrwx 1 root root 18 Jan 4 2018 libfabric.so.1 -> libfabric.so.1.9.3 -rw-r--r-- 1 root root 676K Jan 4 2018 libfabric.so.1.9.3 lrwxrwxrwx 1 root root 15 Jan 7 2018 libffi.so.6 -> libffi.so.6.0.4 -rw-r--r-- 1 root root 31K Jan 7 2018 libffi.so.6.0.4 lrwxrwxrwx 1 root root 23 Apr 5 2018 libfontconfig.so.1 -> libfontconfig.so.1.10.1 -rw-r--r-- 1 root root 276K Apr 5 2018 libfontconfig.so.1.10.1 lrwxrwxrwx 1 root root 14 May 23 2018 libform.so.5 -> libform.so.5.9 -rw-r--r-- 1 root root 60K May 23 2018 libform.so.5.9 lrwxrwxrwx 1 root root 15 May 23 2018 libformw.so.5 -> libformw.so.5.9 -rw-r--r-- 1 root root 68K May 23 2018 libformw.so.5.9 lrwxrwxrwx 1 root root 18 Jul 6 11:25 libfreebl3.chk -> nss/libfreebl3.chk lrwxrwxrwx 1 root root 17 Jul 6 11:25 libfreebl3.so -> nss/libfreebl3.so lrwxrwxrwx 1 root root 22 Jul 6 11:25 libfreeblpriv3.chk -> nss/libfreeblpriv3.chk lrwxrwxrwx 1 root root 21 Jul 6 11:25 libfreeblpriv3.so -> nss/libfreeblpriv3.so lrwxrwxrwx 1 root root 21 Jul 19 16:39 libfreetype.so.6 -> libfreetype.so.6.15.0 -rw-r--r-- 1 root root 719K Jul 19 16:39 libfreetype.so.6.15.0 lrwxrwxrwx 1 root root 18 Feb 22 2018 libfreexl.so.1 -> libfreexl.so.1.1.0 -rw-r--r-- 1 root root 34K Feb 22 2018 libfreexl.so.1.1.0 lrwxrwxrwx 1 root root 16 Aug 19 2016 libfyba.so.0 -> libfyba.so.0.0.0 -rw-r--r-- 1 root root 215K Aug 19 2016 libfyba.so.0.0.0 lrwxrwxrwx 1 root root 16 Aug 19 2016 libfygm.so.0 -> libfygm.so.0.0.0 -rw-r--r-- 1 root root 27K Aug 19 2016 libfygm.so.0.0.0 lrwxrwxrwx 1 root root 16 Aug 19 2016 libfyut.so.0 -> libfyut.so.0.0.0 -rw-r--r-- 1 root root 35K Aug 19 2016 libfyut.so.0.0.0 -rw-r--r-- 1 root root 1.2K May 3 10:19 libg.a lrwxrwxrwx 1 root root 14 Aug 30 2021 libgd.so.3 -> libgd.so.3.0.5 -rw-r--r-- 1 root root 397K Aug 30 2021 libgd.so.3.0.5 lrwxrwxrwx 1 root root 16 Mar 14 2018 libgdbm.so.5 -> libgdbm.so.5.0.0 -rw-r--r-- 1 root root 51K Mar 14 2018 libgdbm.so.5.0.0 lrwxrwxrwx 1 root root 23 Mar 14 2018 libgdbm_compat.so.4 -> libgdbm_compat.so.4.0.0 -rw-r--r-- 1 root root 14K Mar 14 2018 libgdbm_compat.so.4.0.0 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmCommon.so -> libgdcmCommon.so.2.8 lrwxrwxrwx 1 root root 22 Feb 5 2018 libgdcmCommon.so.2.8 -> libgdcmCommon.so.2.8.4 -rw-r--r-- 1 root root 155K Feb 5 2018 libgdcmCommon.so.2.8.4 lrwxrwxrwx 1 root root 18 Feb 5 2018 libgdcmDICT.so -> libgdcmDICT.so.2.8 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmDICT.so.2.8 -> libgdcmDICT.so.2.8.4 -rw-r--r-- 1 root root 2.4M Feb 5 2018 libgdcmDICT.so.2.8.4 lrwxrwxrwx 1 root root 18 Feb 5 2018 libgdcmDSED.so -> libgdcmDSED.so.2.8 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmDSED.so.2.8 -> libgdcmDSED.so.2.8.4 -rw-r--r-- 1 root root 1.1M Feb 5 2018 libgdcmDSED.so.2.8.4 lrwxrwxrwx 1 root root 17 Feb 5 2018 libgdcmIOD.so -> libgdcmIOD.so.2.8 lrwxrwxrwx 1 root root 19 Feb 5 2018 libgdcmIOD.so.2.8 -> libgdcmIOD.so.2.8.4 -rw-r--r-- 1 root root 87K Feb 5 2018 libgdcmIOD.so.2.8.4 lrwxrwxrwx 1 root root 18 Feb 5 2018 libgdcmMEXD.so -> libgdcmMEXD.so.2.8 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmMEXD.so.2.8 -> libgdcmMEXD.so.2.8.4 -rw-r--r-- 1 root root 1.1M Feb 5 2018 libgdcmMEXD.so.2.8.4 lrwxrwxrwx 1 root root 18 Feb 5 2018 libgdcmMSFF.so -> libgdcmMSFF.so.2.8 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmMSFF.so.2.8 -> libgdcmMSFF.so.2.8.4 -rw-r--r-- 1 root root 2.6M Feb 5 2018 libgdcmMSFF.so.2.8.4 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmjpeg12.so -> libgdcmjpeg12.so.2.8 lrwxrwxrwx 1 root root 22 Feb 5 2018 libgdcmjpeg12.so.2.8 -> libgdcmjpeg12.so.2.8.4 -rw-r--r-- 1 root root 158K Feb 5 2018 libgdcmjpeg12.so.2.8.4 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmjpeg16.so -> libgdcmjpeg16.so.2.8 lrwxrwxrwx 1 root root 22 Feb 5 2018 libgdcmjpeg16.so.2.8 -> libgdcmjpeg16.so.2.8.4 -rw-r--r-- 1 root root 158K Feb 5 2018 libgdcmjpeg16.so.2.8.4 lrwxrwxrwx 1 root root 19 Feb 5 2018 libgdcmjpeg8.so -> libgdcmjpeg8.so.2.8 lrwxrwxrwx 1 root root 21 Feb 5 2018 libgdcmjpeg8.so.2.8 -> libgdcmjpeg8.so.2.8.4 -rw-r--r-- 1 root root 158K Feb 5 2018 libgdcmjpeg8.so.2.8.4 lrwxrwxrwx 1 root root 21 Jun 5 2019 libgdk-3.so.0 -> libgdk-3.so.0.2200.30 -rw-r--r-- 1 root root 981K Jun 5 2019 libgdk-3.so.0.2200.30 lrwxrwxrwx 1 root root 27 Mar 24 2018 libgdk-x11-2.0.so.0 -> libgdk-x11-2.0.so.0.2400.32 -rw-r--r-- 1 root root 724K Mar 24 2018 libgdk-x11-2.0.so.0.2400.32 lrwxrwxrwx 1 root root 29 Mar 16 2018 libgdk_pixbuf-2.0.so.0 -> libgdk_pixbuf-2.0.so.0.3611.0 -rw-r--r-- 1 root root 144K Mar 16 2018 libgdk_pixbuf-2.0.so.0.3611.0 lrwxrwxrwx 1 root root 34 Mar 16 2018 libgdk_pixbuf_xlib-2.0.so.0 -> libgdk_pixbuf_xlib-2.0.so.0.3611.0 -rw-r--r-- 1 root root 67K Mar 16 2018 libgdk_pixbuf_xlib-2.0.so.0.3611.0 -rw-r--r-- 1 root root 1.6M Mar 1 2018 libgeos-3.6.2.so lrwxrwxrwx 1 root root 19 Mar 1 2018 libgeos_c.so.1 -> libgeos_c.so.1.10.2 -rw-r--r-- 1 root root 187K Mar 1 2018 libgeos_c.so.1.10.2 lrwxrwxrwx 1 root root 19 Nov 17 2016 libgeotiff.so.2 -> libgeotiff.so.2.1.2 -rw-r--r-- 1 root root 215K Nov 17 2016 libgeotiff.so.2.1.2 lrwxrwxrwx 1 root root 20 Dec 4 2019 libgfortran.so.4 -> libgfortran.so.4.0.0 -rw-r--r-- 1 root root 1.9M Dec 4 2019 libgfortran.so.4.0.0 lrwxrwxrwx 1 root root 15 Aug 19 2019 libgif.so.7 -> libgif.so.7.0.0 -rw-r--r-- 1 root root 34K Aug 19 2019 libgif.so.7.0.0 -rw-r--r-- 1 root root 4.0M Nov 29 2021 libgio-2.0.a lrwxrwxrwx 1 root root 22 Nov 29 2021 libgio-2.0.so -> libgio-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 22 Nov 29 2021 libgio-2.0.so.0 -> libgio-2.0.so.0.5600.4 -rw-r--r-- 1 root root 1.7M Nov 29 2021 libgio-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 28 Apr 15 2018 libgirepository-1.0.so.1 -> libgirepository-1.0.so.1.0.0 -rw-r--r-- 1 root root 208K Apr 15 2018 libgirepository-1.0.so.1.0.0 lrwxrwxrwx 1 root root 17 Sep 27 2017 libgl2ps.so.1.4 -> libgl2ps.so.1.4.0 -rw-r--r-- 1 root root 79K Sep 27 2017 libgl2ps.so.1.4.0 lrwxrwxrwx 1 root root 17 Jun 12 2020 libglapi.so.0 -> libglapi.so.0.0.0 -rw-r--r-- 1 root root 215K Jun 12 2020 libglapi.so.0.0.0 -rw-r--r-- 1 root root 2.0M Nov 29 2021 libglib-2.0.a lrwxrwxrwx 1 root root 23 Nov 29 2021 libglib-2.0.so -> libglib-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 23 Nov 29 2021 libglib-2.0.so.0 -> libglib-2.0.so.0.5600.4 -rw-r--r-- 1 root root 1.1M Nov 29 2021 libglib-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 15 Dec 7 2017 libgme.so.0 -> libgme.so.0.6.2 -rw-r--r-- 1 root root 303K Dec 7 2017 libgme.so.0.6.2 -rw-r--r-- 1 root root 16K Nov 29 2021 libgmodule-2.0.a lrwxrwxrwx 1 root root 26 Nov 29 2021 libgmodule-2.0.so -> libgmodule-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 26 Nov 29 2021 libgmodule-2.0.so.0 -> libgmodule-2.0.so.0.5600.4 -rw-r--r-- 1 root root 14K Nov 29 2021 libgmodule-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 16 Jan 24 2018 libgmp.so.10 -> libgmp.so.10.3.2 -rw-r--r-- 1 root root 515K Jan 24 2018 libgmp.so.10.3.2 lrwxrwxrwx 1 root root 21 Aug 25 2021 libgnutls.so.30 -> libgnutls.so.30.14.10 -rw-r--r-- 1 root root 1.4M Aug 25 2021 libgnutls.so.30.14.10 -rw-r--r-- 1 root root 667K Nov 29 2021 libgobject-2.0.a lrwxrwxrwx 1 root root 26 Nov 29 2021 libgobject-2.0.so -> libgobject-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 26 Nov 29 2021 libgobject-2.0.so.0 -> libgobject-2.0.so.0.5600.4 -rw-r--r-- 1 root root 335K Nov 29 2021 libgobject-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 16 Mar 10 2020 libgomp.so.1 -> libgomp.so.1.0.0 -rw-r--r-- 1 root root 188K Mar 10 2020 libgomp.so.1.0.0 drwxr-xr-x 3 root root 4.0K Aug 16 08:41 libgphoto2 drwxr-xr-x 3 root root 4.0K Aug 16 08:41 libgphoto2-dev -rw-r--r-- 1 root root 262K Dec 23 2017 libgphoto2.a lrwxrwxrwx 1 root root 19 Dec 23 2017 libgphoto2.so -> libgphoto2.so.6.0.0 lrwxrwxrwx 1 root root 19 Dec 23 2017 libgphoto2.so.6 -> libgphoto2.so.6.0.0 -rw-r--r-- 1 root root 143K Dec 23 2017 libgphoto2.so.6.0.0 drwxr-xr-x 3 root root 4.0K Aug 16 08:41 libgphoto2_port -rw-r--r-- 1 root root 79K Dec 23 2017 libgphoto2_port.a lrwxrwxrwx 1 root root 25 Dec 23 2017 libgphoto2_port.so -> libgphoto2_port.so.12.0.0 lrwxrwxrwx 1 root root 25 Dec 23 2017 libgphoto2_port.so.12 -> libgphoto2_port.so.12.0.0 -rw-r--r-- 1 root root 43K Dec 23 2017 libgphoto2_port.so.12.0.0 lrwxrwxrwx 1 root root 17 Mar 11 2018 libgraphite2.so -> libgraphite2.so.3 lrwxrwxrwx 1 root root 17 Mar 11 2018 libgraphite2.so.2.0.0 -> libgraphite2.so.3 lrwxrwxrwx 1 root root 21 Mar 11 2018 libgraphite2.so.3 -> libgraphite2.so.3.0.1 -rw-r--r-- 1 root root 179K Mar 11 2018 libgraphite2.so.3.0.1 lrwxrwxrwx 1 root root 16 Apr 3 2018 libgsm.so.1 -> libgsm.so.1.0.12 -rw-r--r-- 1 root root 51K Apr 3 2018 libgsm.so.1.0.12 lrwxrwxrwx 1 root root 18 Dec 15 2017 libgssapi.so.3 -> libgssapi.so.3.0.0 -rw-r--r-- 1 root root 260K Dec 15 2017 libgssapi.so.3.0.0 lrwxrwxrwx 1 root root 21 Nov 11 2020 libgssapi_krb5.so.2 -> libgssapi_krb5.so.2.2 -rw-r--r-- 1 root root 299K Nov 11 2020 libgssapi_krb5.so.2.2 -rw-r--r-- 1 root root 2.7K Nov 29 2021 libgthread-2.0.a lrwxrwxrwx 1 root root 26 Nov 29 2021 libgthread-2.0.so -> libgthread-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 26 Nov 29 2021 libgthread-2.0.so.0 -> libgthread-2.0.so.0.5600.4 -rw-r--r-- 1 root root 5.9K Nov 29 2021 libgthread-2.0.so.0.5600.4 drwxr-xr-x 2 root root 4.0K Aug 16 08:41 libgtk-3-0 lrwxrwxrwx 1 root root 21 Jun 5 2019 libgtk-3.so.0 -> libgtk-3.so.0.2200.30 -rw-r--r-- 1 root root 7.1M Jun 5 2019 libgtk-3.so.0.2200.30 lrwxrwxrwx 1 root root 27 Mar 24 2018 libgtk-x11-2.0.so.0 -> libgtk-x11-2.0.so.0.2400.32 -rw-r--r-- 1 root root 4.3M Mar 24 2018 libgtk-x11-2.0.so.0.2400.32 drwxr-xr-x 2 root root 4.0K Aug 16 08:41 libgtk2.0-0 lrwxrwxrwx 1 root root 19 Feb 3 2017 libgts-0.7.so.5 -> libgts-0.7.so.5.0.1 -rw-r--r-- 1 root root 371K Feb 3 2017 libgts-0.7.so.5.0.1 lrwxrwxrwx 1 root root 15 Mar 24 2018 libgvc.so -> libgvc.so.6.0.0 lrwxrwxrwx 1 root root 15 Mar 24 2018 libgvc.so.6 -> libgvc.so.6.0.0 -rw-r--r-- 1 root root 613K Mar 24 2018 libgvc.so.6.0.0 lrwxrwxrwx 1 root root 16 Mar 24 2018 libgvpr.so -> libgvpr.so.2.0.0 lrwxrwxrwx 1 root root 16 Mar 24 2018 libgvpr.so.2 -> libgvpr.so.2.0.0 -rw-r--r-- 1 root root 480K Mar 24 2018 libgvpr.so.2.0.0 -rw-r--r-- 1 root root 67K Apr 13 2018 libharfbuzz-gobject.a lrwxrwxrwx 1 root root 32 Apr 13 2018 libharfbuzz-gobject.so -> libharfbuzz-gobject.so.0.10702.0 lrwxrwxrwx 1 root root 32 Apr 13 2018 libharfbuzz-gobject.so.0 -> libharfbuzz-gobject.so.0.10702.0 -rw-r--r-- 1 root root 55K Apr 13 2018 libharfbuzz-gobject.so.0.10702.0 -rw-r--r-- 1 root root 7.0K Apr 13 2018 libharfbuzz-icu.a lrwxrwxrwx 1 root root 28 Apr 13 2018 libharfbuzz-icu.so -> libharfbuzz-icu.so.0.10702.0 lrwxrwxrwx 1 root root 28 Apr 13 2018 libharfbuzz-icu.so.0 -> libharfbuzz-icu.so.0.10702.0 -rw-r--r-- 1 root root 11K Apr 13 2018 libharfbuzz-icu.so.0.10702.0 -rw-r--r-- 1 root root 965K Apr 13 2018 libharfbuzz.a lrwxrwxrwx 1 root root 24 Apr 13 2018 libharfbuzz.so -> libharfbuzz.so.0.10702.0 lrwxrwxrwx 1 root root 24 Apr 13 2018 libharfbuzz.so.0 -> libharfbuzz.so.0.10702.0 -rw-r--r-- 1 root root 631K Apr 13 2018 libharfbuzz.so.0.10702.0 lrwxrwxrwx 1 root root 19 Dec 15 2017 libhcrypto.so.4 -> libhcrypto.so.4.1.0 -rw-r--r-- 1 root root 213K Dec 15 2017 libhcrypto.so.4.1.0 lrwxrwxrwx 1 root root 26 Aug 13 2017 libhdf5_openmpi.so.100 -> libhdf5_openmpi.so.100.0.1 -rw-r--r-- 1 root root 3.5M Aug 13 2017 libhdf5_openmpi.so.100.0.1 lrwxrwxrwx 1 root root 34 Aug 13 2017 libhdf5_openmpi_fortran.so.100 -> libhdf5_openmpi_fortran.so.100.0.1 -rw-r--r-- 1 root root 249K Aug 13 2017 libhdf5_openmpi_fortran.so.100.0.1 lrwxrwxrwx 1 root root 29 Aug 13 2017 libhdf5_openmpi_hl.so.100 -> libhdf5_openmpi_hl.so.100.0.0 -rw-r--r-- 1 root root 136K Aug 13 2017 libhdf5_openmpi_hl.so.100.0.0 lrwxrwxrwx 1 root root 36 Aug 13 2017 libhdf5_openmpihl_fortran.so.100 -> libhdf5_openmpihl_fortran.so.100.0.0 -rw-r--r-- 1 root root 119K Aug 13 2017 libhdf5_openmpihl_fortran.so.100.0.0 lrwxrwxrwx 1 root root 25 Aug 13 2017 libhdf5_serial.so.100 -> libhdf5_serial.so.100.0.1 -rw-r--r-- 1 root root 3.4M Aug 13 2017 libhdf5_serial.so.100.0.1 lrwxrwxrwx 1 root root 33 Aug 13 2017 libhdf5_serial_fortran.so.100 -> libhdf5_serial_fortran.so.100.0.1 -rw-r--r-- 1 root root 245K Aug 13 2017 libhdf5_serial_fortran.so.100.0.1 lrwxrwxrwx 1 root root 28 Aug 13 2017 libhdf5_serial_hl.so.100 -> libhdf5_serial_hl.so.100.0.0 -rw-r--r-- 1 root root 136K Aug 13 2017 libhdf5_serial_hl.so.100.0.0 lrwxrwxrwx 1 root root 35 Aug 13 2017 libhdf5_serialhl_fortran.so.100 -> libhdf5_serialhl_fortran.so.100.0.0 -rw-r--r-- 1 root root 119K Aug 13 2017 libhdf5_serialhl_fortran.so.100.0.0 lrwxrwxrwx 1 root root 20 Dec 15 2017 libheimbase.so.1 -> libheimbase.so.1.0.0 -rw-r--r-- 1 root root 59K Dec 15 2017 libheimbase.so.1.0.0 lrwxrwxrwx 1 root root 20 Dec 15 2017 libheimntlm.so.0 -> libheimntlm.so.0.1.0 -rw-r--r-- 1 root root 35K Dec 15 2017 libheimntlm.so.0.1.0 lrwxrwxrwx 1 root root 17 Jun 14 2021 libhogweed.so.4 -> libhogweed.so.4.5 -rw-r--r-- 1 root root 215K Jun 14 2021 libhogweed.so.4.5 lrwxrwxrwx 1 root root 13 Jan 19 2018 libhwloc.so.0 -> libhwloc.so.5 lrwxrwxrwx 1 root root 13 Jan 19 2018 libhwloc.so.1 -> libhwloc.so.5 lrwxrwxrwx 1 root root 13 Jan 19 2018 libhwloc.so.2 -> libhwloc.so.5 lrwxrwxrwx 1 root root 13 Jan 19 2018 libhwloc.so.3 -> libhwloc.so.5 lrwxrwxrwx 1 root root 13 Jan 19 2018 libhwloc.so.4 -> libhwloc.so.5 lrwxrwxrwx 1 root root 17 Jan 19 2018 libhwloc.so.5 -> libhwloc.so.5.7.6 -rw-r--r-- 1 root root 244K Jan 19 2018 libhwloc.so.5.7.6 lrwxrwxrwx 1 root root 17 Dec 15 2017 libhx509.so.5 -> libhx509.so.5.0.0 -rw-r--r-- 1 root root 294K Dec 15 2017 libhx509.so.5.0.0 lrwxrwxrwx 1 root root 22 Aug 5 2019 libibverbs.so.1 -> libibverbs.so.1.1.17.1 -rw-r--r-- 1 root root 87K Aug 5 2019 libibverbs.so.1.1.17.1 -rw-r--r-- 1 root root 55K Dec 9 2017 libicu-le-hb.a lrwxrwxrwx 1 root root 21 Dec 9 2017 libicu-le-hb.so -> libicu-le-hb.so.0.0.0 lrwxrwxrwx 1 root root 21 Dec 9 2017 libicu-le-hb.so.0 -> libicu-le-hb.so.0.0.0 -rw-r--r-- 1 root root 35K Dec 9 2017 libicu-le-hb.so.0.0.0 -rw-r--r-- 1 root root 26M Oct 19 2021 libicudata.a lrwxrwxrwx 1 root root 18 Oct 19 2021 libicudata.so -> libicudata.so.60.2 lrwxrwxrwx 1 root root 18 Oct 19 2021 libicudata.so.60 -> libicudata.so.60.2 -rw-r--r-- 1 root root 26M Oct 19 2021 libicudata.so.60.2 -rw-r--r-- 1 root root 5.9M Oct 19 2021 libicui18n.a lrwxrwxrwx 1 root root 18 Oct 19 2021 libicui18n.so -> libicui18n.so.60.2 lrwxrwxrwx 1 root root 18 Oct 19 2021 libicui18n.so.60 -> libicui18n.so.60.2 -rw-r--r-- 1 root root 2.7M Oct 19 2021 libicui18n.so.60.2 -rw-r--r-- 1 root root 85K Oct 19 2021 libicuio.a lrwxrwxrwx 1 root root 16 Oct 19 2021 libicuio.so -> libicuio.so.60.2 lrwxrwxrwx 1 root root 16 Oct 19 2021 libicuio.so.60 -> libicuio.so.60.2 -rw-r--r-- 1 root root 55K Oct 19 2021 libicuio.so.60.2 -rw-r--r-- 1 root root 75K Oct 19 2021 libiculx.a lrwxrwxrwx 1 root root 16 Oct 19 2021 libiculx.so -> libiculx.so.60.2 lrwxrwxrwx 1 root root 16 Oct 19 2021 libiculx.so.60 -> libiculx.so.60.2 -rw-r--r-- 1 root root 47K Oct 19 2021 libiculx.so.60.2 -rw-r--r-- 1 root root 109K Oct 19 2021 libicutest.a lrwxrwxrwx 1 root root 18 Oct 19 2021 libicutest.so -> libicutest.so.60.2 lrwxrwxrwx 1 root root 18 Oct 19 2021 libicutest.so.60 -> libicutest.so.60.2 -rw-r--r-- 1 root root 64K Oct 19 2021 libicutest.so.60.2 -rw-r--r-- 1 root root 359K Oct 19 2021 libicutu.a lrwxrwxrwx 1 root root 16 Oct 19 2021 libicutu.so -> libicutu.so.60.2 lrwxrwxrwx 1 root root 16 Oct 19 2021 libicutu.so.60 -> libicutu.so.60.2 -rw-r--r-- 1 root root 199K Oct 19 2021 libicutu.so.60.2 -rw-r--r-- 1 root root 3.1M Oct 19 2021 libicuuc.a lrwxrwxrwx 1 root root 16 Oct 19 2021 libicuuc.so -> libicuuc.so.60.2 lrwxrwxrwx 1 root root 16 Oct 19 2021 libicuuc.so.60 -> libicuuc.so.60.2 -rw-r--r-- 1 root root 1.8M Oct 19 2021 libicuuc.so.60.2 lrwxrwxrwx 1 root root 16 Oct 24 2019 libidn2.so.0 -> libidn2.so.0.3.3 -rw-r--r-- 1 root root 114K Oct 24 2019 libidn2.so.0.3.3 lrwxrwxrwx 1 root root 20 Jan 14 2018 libinfinipath.so.4 -> libinfinipath.so.4.0 -rw-r--r-- 1 root root 59K Jan 14 2018 libinfinipath.so.4.0 lrwxrwxrwx 1 root root 16 Mar 9 2018 libisl.so.19 -> libisl.so.19.0.0 -rw-r--r-- 1 root root 1.6M Mar 9 2018 libisl.so.19.0.0 lrwxrwxrwx 1 root root 15 Mar 10 2020 libitm.so.1 -> libitm.so.1.0.0 -rw-r--r-- 1 root root 111K Mar 10 2020 libitm.so.1.0.0 -rw-r--r-- 1 root root 51K Apr 3 2018 libjbig.a lrwxrwxrwx 1 root root 12 Apr 3 2018 libjbig.so -> libjbig.so.0 -rw-r--r-- 1 root root 57K Apr 3 2018 libjbig.so.0 -rw-r--r-- 1 root root 579K Jun 4 2020 libjpeg.a lrwxrwxrwx 1 root root 16 Jun 4 2020 libjpeg.so -> libjpeg.so.8.1.2 lrwxrwxrwx 1 root root 16 Jun 4 2020 libjpeg.so.8 -> libjpeg.so.8.1.2 -rw-r--r-- 1 root root 415K Jun 4 2020 libjpeg.so.8.1.2 lrwxrwxrwx 1 root root 27 Apr 6 2020 libjson-glib-1.0.so.0 -> libjson-glib-1.0.so.0.400.2 -rw-r--r-- 1 root root 154K Apr 6 2020 libjson-glib-1.0.so.0.400.2 lrwxrwxrwx 1 root root 19 Aug 23 2016 libjsoncpp.so.1 -> libjsoncpp.so.1.7.4 -rw-r--r-- 1 root root 199K Aug 23 2016 libjsoncpp.so.1.7.4 lrwxrwxrwx 1 root root 18 Nov 11 2020 libk5crypto.so.3 -> libk5crypto.so.3.1 -rw-r--r-- 1 root root 195K Nov 11 2020 libk5crypto.so.3.1 lrwxrwxrwx 1 root root 19 Sep 25 2017 libkmlbase.so.1 -> libkmlbase.so.1.3.0 -rw-r--r-- 1 root root 107K Sep 25 2017 libkmlbase.so.1.3.0 lrwxrwxrwx 1 root root 18 Sep 25 2017 libkmldom.so.1 -> libkmldom.so.1.3.0 -rw-r--r-- 1 root root 731K Sep 25 2017 libkmldom.so.1.3.0 lrwxrwxrwx 1 root root 21 Sep 25 2017 libkmlengine.so.1 -> libkmlengine.so.1.3.0 -rw-r--r-- 1 root root 223K Sep 25 2017 libkmlengine.so.1.3.0 lrwxrwxrwx 1 root root 17 Dec 15 2017 libkrb5.so.26 -> libkrb5.so.26.0.0 -rw-r--r-- 1 root root 561K Dec 15 2017 libkrb5.so.26.0.0 lrwxrwxrwx 1 root root 14 Nov 11 2020 libkrb5.so.3 -> libkrb5.so.3.3 -rw-r--r-- 1 root root 857K Nov 11 2020 libkrb5.so.3.3 lrwxrwxrwx 1 root root 21 Nov 11 2020 libkrb5support.so.0 -> libkrb5support.so.0.1 -rw-r--r-- 1 root root 43K Nov 11 2020 libkrb5support.so.0.1 lrwxrwxrwx 1 root root 21 Mar 24 2018 liblab_gamut.so -> liblab_gamut.so.1.0.0 lrwxrwxrwx 1 root root 21 Mar 24 2018 liblab_gamut.so.1 -> liblab_gamut.so.1.0.0 -rw-r--r-- 1 root root 2.4M Mar 24 2018 liblab_gamut.so.1.0.0 lrwxrwxrwx 1 root root 46 Aug 16 08:41 liblapack.a -> /etc/alternatives/liblapack.a-x86_64-linux-gnu lrwxrwxrwx 1 root root 47 Aug 16 08:41 liblapack.so -> /etc/alternatives/liblapack.so-x86_64-linux-gnu lrwxrwxrwx 1 root root 49 Aug 16 08:41 liblapack.so.3 -> /etc/alternatives/liblapack.so.3-x86_64-linux-gnu lrwxrwxrwx 1 root root 21 May 12 13:52 liblber-2.4.so.2 -> liblber-2.4.so.2.10.8 -rw-r--r-- 1 root root 55K May 12 13:52 liblber-2.4.so.2.10.8 lrwxrwxrwx 1 root root 17 Sep 20 2018 liblcms2.so.2 -> liblcms2.so.2.0.8 -rw-r--r-- 1 root root 349K Sep 20 2018 liblcms2.so.2.0.8 lrwxrwxrwx 1 root root 18 May 12 13:52 libldap-2.4.so.2 -> libldap_r-2.4.so.2 lrwxrwxrwx 1 root root 23 May 12 13:52 libldap_r-2.4.so.2 -> libldap_r-2.4.so.2.10.8 -rw-r--r-- 1 root root 320K May 12 13:52 libldap_r-2.4.so.2.10.8 lrwxrwxrwx 1 root root 16 Mar 2 2018 liblept.so.5 -> liblept.so.5.0.2 -rw-r--r-- 1 root root 2.4M Mar 2 2018 liblept.so.5.0.2 lrwxrwxrwx 1 root root 16 Mar 10 2020 liblsan.so.0 -> liblsan.so.0.0.0 -rw-r--r-- 1 root root 338K Mar 10 2020 liblsan.so.0.0.0 lrwxrwxrwx 1 root root 16 Aug 20 2016 libltdl.so.7 -> libltdl.so.7.3.1 -rw-r--r-- 1 root root 39K Aug 20 2016 libltdl.so.7.3.1 lrwxrwxrwx 1 root root 15 May 20 2021 liblz4.so.1 -> liblz4.so.1.7.1 -rw-r--r-- 1 root root 111K May 20 2021 liblz4.so.1.7.1 -rw-r--r-- 1 root root 261K Apr 8 12:56 liblzma.a lrwxrwxrwx 1 root root 38 Apr 8 12:56 liblzma.so -> /lib/x86_64-linux-gnu/liblzma.so.5.2.2 -rw-r--r-- 1 root root 3.5M May 3 10:19 libm-2.27.a -rw-r--r-- 1 root root 132 May 3 10:19 libm.a -rw-r--r-- 1 root root 186 May 3 10:19 libm.so lrwxrwxrwx 1 root root 34 Feb 5 2018 libmca_common_libfabric.so.20 -> libmca_common_libfabric.so.20.10.0 lrwxrwxrwx 1 root root 46 Feb 5 2018 libmca_common_libfabric.so.20.10.0 -> openmpi/lib/libmca_common_libfabric.so.20.10.0 lrwxrwxrwx 1 root root 27 Feb 5 2018 libmca_common_sm.so.20 -> libmca_common_sm.so.20.10.1 lrwxrwxrwx 1 root root 39 Feb 5 2018 libmca_common_sm.so.20.10.1 -> openmpi/lib/libmca_common_sm.so.20.10.1 lrwxrwxrwx 1 root root 30 Feb 5 2018 libmca_common_verbs.so.20 -> libmca_common_verbs.so.20.10.0 lrwxrwxrwx 1 root root 42 Feb 5 2018 libmca_common_verbs.so.20.10.0 -> openmpi/lib/libmca_common_verbs.so.20.10.0 -rw-r--r-- 1 root root 1.5K May 3 10:19 libmcheck.a lrwxrwxrwx 1 root root 14 May 23 2018 libmenu.so.5 -> libmenu.so.5.9 -rw-r--r-- 1 root root 35K May 23 2018 libmenu.so.5.9 lrwxrwxrwx 1 root root 15 May 23 2018 libmenuw.so.5 -> libmenuw.so.5.9 -rw-r--r-- 1 root root 35K May 23 2018 libmenuw.so.5.9 lrwxrwxrwx 1 root root 19 Apr 3 2018 libminizip.so.1 -> libminizip.so.1.0.0 -rw-r--r-- 1 root root 43K Apr 3 2018 libminizip.so.1.0.0 lrwxrwxrwx 1 root root 19 Oct 21 2017 libmp3lame.so.0 -> libmp3lame.so.0.0.0 -rw-r--r-- 1 root root 294K Oct 21 2017 libmp3lame.so.0.0.0 lrwxrwxrwx 1 root root 15 Jan 23 2018 libmpc.so.3 -> libmpc.so.3.1.0 -rw-r--r-- 1 root root 96K Jan 23 2018 libmpc.so.3.1.0 lrwxrwxrwx 1 root root 17 Apr 23 2018 libmpdec.so.2 -> libmpdec.so.2.4.2 -rw-r--r-- 1 root root 223K Apr 23 2018 libmpdec.so.2.4.2 lrwxrwxrwx 1 root root 16 Feb 7 2018 libmpfr.so.6 -> libmpfr.so.6.0.1 -rw-r--r-- 1 root root 512K Feb 7 2018 libmpfr.so.6.0.1 lrwxrwxrwx 1 root root 19 Mar 10 2018 libmpg123.so.0 -> libmpg123.so.0.44.8 -rw-r--r-- 1 root root 316K Mar 10 2018 libmpg123.so.0.44.8 lrwxrwxrwx 1 root root 17 Feb 5 2018 libmpi.so.20 -> libmpi.so.20.10.1 lrwxrwxrwx 1 root root 29 Feb 5 2018 libmpi.so.20.10.1 -> openmpi/lib/libmpi.so.20.10.1 lrwxrwxrwx 1 root root 21 Feb 5 2018 libmpi_cxx.so.20 -> libmpi_cxx.so.20.10.0 lrwxrwxrwx 1 root root 33 Feb 5 2018 libmpi_cxx.so.20.10.0 -> openmpi/lib/libmpi_cxx.so.20.10.0 lrwxrwxrwx 1 root root 22 Feb 5 2018 libmpi_java.so.20 -> libmpi_java.so.20.10.0 lrwxrwxrwx 1 root root 34 Feb 5 2018 libmpi_java.so.20.10.0 -> openmpi/lib/libmpi_java.so.20.10.0 lrwxrwxrwx 1 root root 23 Feb 5 2018 libmpi_mpifh.so.20 -> libmpi_mpifh.so.20.11.0 lrwxrwxrwx 1 root root 35 Feb 5 2018 libmpi_mpifh.so.20.11.0 -> openmpi/lib/libmpi_mpifh.so.20.11.0 lrwxrwxrwx 1 root root 35 Feb 5 2018 libmpi_usempi_ignore_tkr.so.20 -> libmpi_usempi_ignore_tkr.so.20.10.0 lrwxrwxrwx 1 root root 47 Feb 5 2018 libmpi_usempi_ignore_tkr.so.20.10.0 -> openmpi/lib/libmpi_usempi_ignore_tkr.so.20.10.0 lrwxrwxrwx 1 root root 27 Feb 5 2018 libmpi_usempif08.so.20 -> libmpi_usempif08.so.20.10.0 lrwxrwxrwx 1 root root 39 Feb 5 2018 libmpi_usempif08.so.20.10.0 -> openmpi/lib/libmpi_usempif08.so.20.10.0 lrwxrwxrwx 1 root root 15 Mar 10 2020 libmpx.so.2 -> libmpx.so.2.0.1 -rw-r--r-- 1 root root 19K Mar 10 2020 libmpx.so.2.0.1 lrwxrwxrwx 1 root root 23 Mar 10 2020 libmpxwrappers.so.2 -> libmpxwrappers.so.2.0.1 -rw-r--r-- 1 root root 15K Mar 10 2020 libmpxwrappers.so.2.0.1 -rw-r--r-- 1 root root 337K May 3 10:19 libmvec.a lrwxrwxrwx 1 root root 34 May 3 10:19 libmvec.so -> /lib/x86_64-linux-gnu/libmvec.so.1 -rw-r--r-- 1 root root 5.9K May 3 10:19 libmvec_nonshared.a lrwxrwxrwx 1 root root 25 Jul 26 11:28 libmysqlclient.so.20 -> libmysqlclient.so.20.3.26 -rw-r--r-- 1 root root 3.8M Jul 26 11:28 libmysqlclient.so.20.3.26 -rw-r--r-- 1 root root 1.1M Feb 9 2018 libnetcdf.so.13 lrwxrwxrwx 1 root root 22 Nov 9 2017 libnetcdf_c++.so.4 -> libnetcdf_c++.so.4.2.0 -rw-r--r-- 1 root root 119K Nov 9 2017 libnetcdf_c++.so.4.2.0 lrwxrwxrwx 1 root root 16 Jun 14 2021 libnettle.so.6 -> libnettle.so.6.5 -rw-r--r-- 1 root root 215K Jun 14 2021 libnettle.so.6.5 lrwxrwxrwx 1 root root 21 Apr 10 2018 libnghttp2.so.14 -> libnghttp2.so.14.15.2 -rw-r--r-- 1 root root 150K Apr 10 2018 libnghttp2.so.14.15.2 lrwxrwxrwx 1 root root 25 Jun 2 2017 libnl-route-3.so.200 -> libnl-route-3.so.200.24.0 -rw-r--r-- 1 root root 458K Jun 2 2017 libnl-route-3.so.200.24.0 -rw-r--r-- 1 root root 201K May 3 10:19 libnsl.a lrwxrwxrwx 1 root root 33 May 3 10:19 libnsl.so -> /lib/x86_64-linux-gnu/libnsl.so.1 -rw-r--r-- 1 root root 234K Feb 22 2018 libnspr4.so -rw-r--r-- 1 root root 1.3M Jul 6 11:25 libnss3.so lrwxrwxrwx 1 root root 40 May 3 10:19 libnss_compat.so -> /lib/x86_64-linux-gnu/libnss_compat.so.2 lrwxrwxrwx 1 root root 37 May 3 10:19 libnss_dns.so -> /lib/x86_64-linux-gnu/libnss_dns.so.2 lrwxrwxrwx 1 root root 39 May 3 10:19 libnss_files.so -> /lib/x86_64-linux-gnu/libnss_files.so.2 lrwxrwxrwx 1 root root 40 May 3 10:19 libnss_hesiod.so -> /lib/x86_64-linux-gnu/libnss_hesiod.so.2 lrwxrwxrwx 1 root root 37 May 3 10:19 libnss_nis.so -> /lib/x86_64-linux-gnu/libnss_nis.so.2 lrwxrwxrwx 1 root root 41 May 3 10:19 libnss_nisplus.so -> /lib/x86_64-linux-gnu/libnss_nisplus.so.2 -rw-r--r-- 1 root root 187K Jul 6 11:25 libnssutil3.so lrwxrwxrwx 1 root root 16 Jun 20 2018 libnuma.so.1 -> libnuma.so.1.0.0 -rw-r--r-- 1 root root 43K Jun 20 2018 libnuma.so.1.0.0 lrwxrwxrwx 1 root root 12 Mar 26 2018 libodbc.so.1 -> libodbc.so.2 lrwxrwxrwx 1 root root 16 Mar 26 2018 libodbc.so.2 -> libodbc.so.2.0.0 -rw-r--r-- 1 root root 422K Mar 26 2018 libodbc.so.2.0.0 lrwxrwxrwx 1 root root 14 Mar 26 2018 libodbccr.so.1 -> libodbccr.so.2 lrwxrwxrwx 1 root root 18 Mar 26 2018 libodbccr.so.2 -> libodbccr.so.2.0.0 -rw-r--r-- 1 root root 43K Mar 26 2018 libodbccr.so.2.0.0 lrwxrwxrwx 1 root root 16 Mar 26 2018 libodbcinst.so.1 -> libodbcinst.so.2 lrwxrwxrwx 1 root root 20 Mar 26 2018 libodbcinst.so.2 -> libodbcinst.so.2.0.0 -rw-r--r-- 1 root root 71K Mar 26 2018 libodbcinst.so.2.0.0 lrwxrwxrwx 1 root root 15 Jun 2 2014 libogg.so.0 -> libogg.so.0.8.2 -rw-r--r-- 1 root root 34K Jun 2 2014 libogg.so.0.8.2 lrwxrwxrwx 1 root root 23 Feb 5 2018 libompitrace.so.20 -> libompitrace.so.20.10.0 lrwxrwxrwx 1 root root 35 Feb 5 2018 libompitrace.so.20.10.0 -> openmpi/lib/libompitrace.so.20.10.0 -rw-r--r-- 1 root root 1.7M Oct 20 2021 libopcodes-2.30-system.so lrwxrwxrwx 1 root root 22 Feb 5 2018 libopen-pal.so.20 -> libopen-pal.so.20.10.1 lrwxrwxrwx 1 root root 34 Feb 5 2018 libopen-pal.so.20.10.1 -> openmpi/lib/libopen-pal.so.20.10.1 lrwxrwxrwx 1 root root 22 Feb 5 2018 libopen-rte.so.20 -> libopen-rte.so.20.10.1 lrwxrwxrwx 1 root root 34 Feb 5 2018 libopen-rte.so.20.10.1 -> openmpi/lib/libopen-rte.so.20.10.1 lrwxrwxrwx 1 root root 22 Sep 19 2017 libopenblas.a -> libopenblasp-r0.2.20.a lrwxrwxrwx 1 root root 23 Sep 19 2017 libopenblas.so -> libopenblasp-r0.2.20.so lrwxrwxrwx 1 root root 23 Sep 19 2017 libopenblas.so.0 -> libopenblasp-r0.2.20.so -rw-r--r-- 1 root root 51M Sep 19 2017 libopenblasp-r0.2.20.a -rw-r--r-- 1 root root 33M Sep 19 2017 libopenblasp-r0.2.20.so -rw-r--r-- 1 root root 483K Sep 20 2018 libopencv_aruco.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_aruco.so -> libopencv_aruco.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_aruco.so.3.2 -> libopencv_aruco.so.3.2.0 -rw-r--r-- 1 root root 285K Sep 20 2018 libopencv_aruco.so.3.2.0 -rw-r--r-- 1 root root 116K Sep 20 2018 libopencv_bgsegm.a lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_bgsegm.so -> libopencv_bgsegm.so.3.2 lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_bgsegm.so.3.2 -> libopencv_bgsegm.so.3.2.0 -rw-r--r-- 1 root root 51K Sep 20 2018 libopencv_bgsegm.so.3.2.0 -rw-r--r-- 1 root root 449K Sep 20 2018 libopencv_bioinspired.a lrwxrwxrwx 1 root root 28 Sep 20 2018 libopencv_bioinspired.so -> libopencv_bioinspired.so.3.2 lrwxrwxrwx 1 root root 30 Sep 20 2018 libopencv_bioinspired.so.3.2 -> libopencv_bioinspired.so.3.2.0 -rw-r--r-- 1 root root 171K Sep 20 2018 libopencv_bioinspired.so.3.2.0 -rw-r--r-- 1 root root 2.3M Sep 20 2018 libopencv_calib3d.a lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_calib3d.so -> libopencv_calib3d.so.3.2 lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_calib3d.so.3.2 -> libopencv_calib3d.so.3.2.0 -rw-r--r-- 1 root root 1.4M Sep 20 2018 libopencv_calib3d.so.3.2.0 -rw-r--r-- 1 root root 633K Sep 20 2018 libopencv_ccalib.a lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_ccalib.so -> libopencv_ccalib.so.3.2 lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_ccalib.so.3.2 -> libopencv_ccalib.so.3.2.0 -rw-r--r-- 1 root root 363K Sep 20 2018 libopencv_ccalib.so.3.2.0 -rw-r--r-- 1 root root 4.6M Sep 20 2018 libopencv_core.a lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_core.so -> libopencv_core.so.3.2 lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_core.so.3.2 -> libopencv_core.so.3.2.0 -rw-r--r-- 1 root root 2.3M Sep 20 2018 libopencv_core.so.3.2.0 -rw-r--r-- 1 root root 1.4M Sep 20 2018 libopencv_datasets.a lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_datasets.so -> libopencv_datasets.so.3.2 lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_datasets.so.3.2 -> libopencv_datasets.so.3.2.0 -rw-r--r-- 1 root root 399K Sep 20 2018 libopencv_datasets.so.3.2.0 -rw-r--r-- 1 root root 206K Sep 20 2018 libopencv_dpm.a lrwxrwxrwx 1 root root 20 Sep 20 2018 libopencv_dpm.so -> libopencv_dpm.so.3.2 lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_dpm.so.3.2 -> libopencv_dpm.so.3.2.0 -rw-r--r-- 1 root root 99K Sep 20 2018 libopencv_dpm.so.3.2.0 -rw-r--r-- 1 root root 331K Sep 20 2018 libopencv_face.a lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_face.so -> libopencv_face.so.3.2 lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_face.so.3.2 -> libopencv_face.so.3.2.0 -rw-r--r-- 1 root root 143K Sep 20 2018 libopencv_face.so.3.2.0 -rw-r--r-- 1 root root 1.4M Sep 20 2018 libopencv_features2d.a lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_features2d.so -> libopencv_features2d.so.3.2 lrwxrwxrwx 1 root root 29 Sep 20 2018 libopencv_features2d.so.3.2 -> libopencv_features2d.so.3.2.0 -rw-r--r-- 1 root root 723K Sep 20 2018 libopencv_features2d.so.3.2.0 -rw-r--r-- 1 root root 1007K Sep 20 2018 libopencv_flann.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_flann.so -> libopencv_flann.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_flann.so.3.2 -> libopencv_flann.so.3.2.0 -rw-r--r-- 1 root root 319K Sep 20 2018 libopencv_flann.so.3.2.0 -rw-r--r-- 1 root root 43K Sep 20 2018 libopencv_freetype.a lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_freetype.so -> libopencv_freetype.so.3.2 lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_freetype.so.3.2 -> libopencv_freetype.so.3.2.0 -rw-r--r-- 1 root root 27K Sep 20 2018 libopencv_freetype.so.3.2.0 -rw-r--r-- 1 root root 73K Sep 20 2018 libopencv_fuzzy.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_fuzzy.so -> libopencv_fuzzy.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_fuzzy.so.3.2 -> libopencv_fuzzy.so.3.2.0 -rw-r--r-- 1 root root 54K Sep 20 2018 libopencv_fuzzy.so.3.2.0 -rw-r--r-- 1 root root 75K Sep 20 2018 libopencv_hdf.a lrwxrwxrwx 1 root root 20 Sep 20 2018 libopencv_hdf.so -> libopencv_hdf.so.3.2 lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_hdf.so.3.2 -> libopencv_hdf.so.3.2.0 -rw-r--r-- 1 root root 39K Sep 20 2018 libopencv_hdf.so.3.2.0 -rw-r--r-- 1 root root 94K Sep 20 2018 libopencv_highgui.a lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_highgui.so -> libopencv_highgui.so.3.2 lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_highgui.so.3.2 -> libopencv_highgui.so.3.2.0 -rw-r--r-- 1 root root 47K Sep 20 2018 libopencv_highgui.so.3.2.0 -rw-r--r-- 1 root root 756K Sep 20 2018 libopencv_imgcodecs.a lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_imgcodecs.so -> libopencv_imgcodecs.so.3.2 lrwxrwxrwx 1 root root 28 Sep 20 2018 libopencv_imgcodecs.so.3.2 -> libopencv_imgcodecs.so.3.2.0 -rw-r--r-- 1 root root 227K Sep 20 2018 libopencv_imgcodecs.so.3.2.0 -rw-r--r-- 1 root root 5.5M Sep 20 2018 libopencv_imgproc.a lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_imgproc.so -> libopencv_imgproc.so.3.2 lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_imgproc.so.3.2 -> libopencv_imgproc.so.3.2.0 -rw-r--r-- 1 root root 2.8M Sep 20 2018 libopencv_imgproc.so.3.2.0 -rw-r--r-- 1 root root 312K Sep 20 2018 libopencv_line_descriptor.a lrwxrwxrwx 1 root root 32 Sep 20 2018 libopencv_line_descriptor.so -> libopencv_line_descriptor.so.3.2 lrwxrwxrwx 1 root root 34 Sep 20 2018 libopencv_line_descriptor.so.3.2 -> libopencv_line_descriptor.so.3.2.0 -rw-r--r-- 1 root root 159K Sep 20 2018 libopencv_line_descriptor.so.3.2.0 -rw-r--r-- 1 root root 1.5M Sep 20 2018 libopencv_ml.a lrwxrwxrwx 1 root root 19 Sep 20 2018 libopencv_ml.so -> libopencv_ml.so.3.2 lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_ml.so.3.2 -> libopencv_ml.so.3.2.0 -rw-r--r-- 1 root root 679K Sep 20 2018 libopencv_ml.so.3.2.0 -rw-r--r-- 1 root root 689K Sep 20 2018 libopencv_objdetect.a lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_objdetect.so -> libopencv_objdetect.so.3.2 lrwxrwxrwx 1 root root 28 Sep 20 2018 libopencv_objdetect.so.3.2 -> libopencv_objdetect.so.3.2.0 -rw-r--r-- 1 root root 343K Sep 20 2018 libopencv_objdetect.so.3.2.0 -rw-r--r-- 1 root root 648K Sep 20 2018 libopencv_optflow.a lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_optflow.so -> libopencv_optflow.so.3.2 lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_optflow.so.3.2 -> libopencv_optflow.so.3.2.0 -rw-r--r-- 1 root root 327K Sep 20 2018 libopencv_optflow.so.3.2.0 -rw-r--r-- 1 root root 62K Sep 20 2018 libopencv_phase_unwrapping.a lrwxrwxrwx 1 root root 33 Sep 20 2018 libopencv_phase_unwrapping.so -> libopencv_phase_unwrapping.so.3.2 lrwxrwxrwx 1 root root 35 Sep 20 2018 libopencv_phase_unwrapping.so.3.2 -> libopencv_phase_unwrapping.so.3.2.0 -rw-r--r-- 1 root root 35K Sep 20 2018 libopencv_phase_unwrapping.so.3.2.0 -rw-r--r-- 1 root root 1.5M Sep 20 2018 libopencv_photo.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_photo.so -> libopencv_photo.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_photo.so.3.2 -> libopencv_photo.so.3.2.0 -rw-r--r-- 1 root root 771K Sep 20 2018 libopencv_photo.so.3.2.0 -rw-r--r-- 1 root root 66K Sep 20 2018 libopencv_plot.a lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_plot.so -> libopencv_plot.so.3.2 lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_plot.so.3.2 -> libopencv_plot.so.3.2.0 -rw-r--r-- 1 root root 43K Sep 20 2018 libopencv_plot.so.3.2.0 -rw-r--r-- 1 root root 268K Sep 20 2018 libopencv_reg.a lrwxrwxrwx 1 root root 20 Sep 20 2018 libopencv_reg.so -> libopencv_reg.so.3.2 lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_reg.so.3.2 -> libopencv_reg.so.3.2.0 -rw-r--r-- 1 root root 127K Sep 20 2018 libopencv_reg.so.3.2.0 -rw-r--r-- 1 root root 982K Sep 20 2018 libopencv_rgbd.a lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_rgbd.so -> libopencv_rgbd.so.3.2 lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_rgbd.so.3.2 -> libopencv_rgbd.so.3.2.0 -rw-r--r-- 1 root root 535K Sep 20 2018 libopencv_rgbd.so.3.2.0 -rw-r--r-- 1 root root 353K Sep 20 2018 libopencv_saliency.a lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_saliency.so -> libopencv_saliency.so.3.2 lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_saliency.so.3.2 -> libopencv_saliency.so.3.2.0 -rw-r--r-- 1 root root 179K Sep 20 2018 libopencv_saliency.so.3.2.0 -rw-r--r-- 1 root root 428K Sep 20 2018 libopencv_shape.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_shape.so -> libopencv_shape.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_shape.so.3.2 -> libopencv_shape.so.3.2.0 -rw-r--r-- 1 root root 187K Sep 20 2018 libopencv_shape.so.3.2.0 -rw-r--r-- 1 root root 327K Sep 20 2018 libopencv_stereo.a lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_stereo.so -> libopencv_stereo.so.3.2 lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_stereo.so.3.2 -> libopencv_stereo.so.3.2.0 -rw-r--r-- 1 root root 123K Sep 20 2018 libopencv_stereo.so.3.2.0 -rw-r--r-- 1 root root 1.1M Sep 20 2018 libopencv_stitching.a lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_stitching.so -> libopencv_stitching.so.3.2 lrwxrwxrwx 1 root root 28 Sep 20 2018 libopencv_stitching.so.3.2 -> libopencv_stitching.so.3.2.0 -rw-r--r-- 1 root root 519K Sep 20 2018 libopencv_stitching.so.3.2.0 -rw-r--r-- 1 root root 156K Sep 20 2018 libopencv_structured_light.a lrwxrwxrwx 1 root root 33 Sep 20 2018 libopencv_structured_light.so -> libopencv_structured_light.so.3.2 lrwxrwxrwx 1 root root 35 Sep 20 2018 libopencv_structured_light.so.3.2 -> libopencv_structured_light.so.3.2.0 -rw-r--r-- 1 root root 91K Sep 20 2018 libopencv_structured_light.so.3.2.0 -rw-r--r-- 1 root root 295K Sep 20 2018 libopencv_superres.a lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_superres.so -> libopencv_superres.so.3.2 lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_superres.so.3.2 -> libopencv_superres.so.3.2.0 -rw-r--r-- 1 root root 151K Sep 20 2018 libopencv_superres.so.3.2.0 -rw-r--r-- 1 root root 1.1M Sep 20 2018 libopencv_surface_matching.a lrwxrwxrwx 1 root root 33 Sep 20 2018 libopencv_surface_matching.so -> libopencv_surface_matching.so.3.2 lrwxrwxrwx 1 root root 35 Sep 20 2018 libopencv_surface_matching.so.3.2 -> libopencv_surface_matching.so.3.2.0 -rw-r--r-- 1 root root 259K Sep 20 2018 libopencv_surface_matching.so.3.2.0 -rw-r--r-- 1 root root 761K Sep 20 2018 libopencv_text.a lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_text.so -> libopencv_text.so.3.2 lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_text.so.3.2 -> libopencv_text.so.3.2.0 -rw-r--r-- 1 root root 367K Sep 20 2018 libopencv_text.so.3.2.0 -rw-r--r-- 1 root root 1.6M Sep 20 2018 libopencv_ts.a -rw-r--r-- 1 root root 700K Sep 20 2018 libopencv_video.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_video.so -> libopencv_video.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_video.so.3.2 -> libopencv_video.so.3.2.0 -rw-r--r-- 1 root root 395K Sep 20 2018 libopencv_video.so.3.2.0 -rw-r--r-- 1 root root 488K Sep 20 2018 libopencv_videoio.a lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_videoio.so -> libopencv_videoio.so.3.2 lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_videoio.so.3.2 -> libopencv_videoio.so.3.2.0 -rw-r--r-- 1 root root 211K Sep 20 2018 libopencv_videoio.so.3.2.0 -rw-r--r-- 1 root root 688K Sep 20 2018 libopencv_videostab.a lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_videostab.so -> libopencv_videostab.so.3.2 lrwxrwxrwx 1 root root 28 Sep 20 2018 libopencv_videostab.so.3.2 -> libopencv_videostab.so.3.2.0 -rw-r--r-- 1 root root 347K Sep 20 2018 libopencv_videostab.so.3.2.0 -rw-r--r-- 1 root root 1019K Sep 20 2018 libopencv_viz.a lrwxrwxrwx 1 root root 20 Sep 20 2018 libopencv_viz.so -> libopencv_viz.so.3.2 lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_viz.so.3.2 -> libopencv_viz.so.3.2.0 -rw-r--r-- 1 root root 363K Sep 20 2018 libopencv_viz.so.3.2.0 -rw-r--r-- 1 root root 2.3M Sep 20 2018 libopencv_ximgproc.a lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_ximgproc.so -> libopencv_ximgproc.so.3.2 lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_ximgproc.so.3.2 -> libopencv_ximgproc.so.3.2.0 -rw-r--r-- 1 root root 983K Sep 20 2018 libopencv_ximgproc.so.3.2.0 -rw-r--r-- 1 root root 186K Sep 20 2018 libopencv_xobjdetect.a lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_xobjdetect.so -> libopencv_xobjdetect.so.3.2 lrwxrwxrwx 1 root root 29 Sep 20 2018 libopencv_xobjdetect.so.3.2 -> libopencv_xobjdetect.so.3.2.0 -rw-r--r-- 1 root root 87K Sep 20 2018 libopencv_xobjdetect.so.3.2.0 -rw-r--r-- 1 root root 441K Sep 20 2018 libopencv_xphoto.a lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_xphoto.so -> libopencv_xphoto.so.3.2 lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_xphoto.so.3.2 -> libopencv_xphoto.so.3.2.0 -rw-r--r-- 1 root root 220K Sep 20 2018 libopencv_xphoto.so.3.2.0 -rw-r--r-- 1 root root 343K Aug 20 2019 libopenjp2.so.2.3.0 lrwxrwxrwx 1 root root 19 Aug 20 2019 libopenjp2.so.7 -> libopenjp2.so.2.3.0 lrwxrwxrwx 1 root root 19 Feb 4 2018 libopenmpt.so.0 -> libopenmpt.so.0.1.1 -rw-r--r-- 1 root root 1.5M Feb 4 2018 libopenmpt.so.0.1.1 lrwxrwxrwx 1 root root 16 Feb 17 2016 libopus.so.0 -> libopus.so.0.5.2 -rw-r--r-- 1 root root 295K Feb 17 2016 libopus.so.0.5.2 lrwxrwxrwx 1 root root 32 Feb 5 2018 liboshmem.so.20 -> openmpi/lib/liboshmem.so.20.10.1 lrwxrwxrwx 1 root root 19 Jan 4 2021 libp11-kit.so.0 -> libp11-kit.so.0.3.0 -rw-r--r-- 1 root root 1.2M Jan 4 2021 libp11-kit.so.0.3.0 lrwxrwxrwx 1 root root 15 May 23 2018 libpanel.so.5 -> libpanel.so.5.9 -rw-r--r-- 1 root root 14K May 23 2018 libpanel.so.5.9 lrwxrwxrwx 1 root root 16 May 23 2018 libpanelw.so.5 -> libpanelw.so.5.9 -rw-r--r-- 1 root root 14K May 23 2018 libpanelw.so.5.9 lrwxrwxrwx 1 root root 25 Aug 21 2018 libpango-1.0.so.0 -> libpango-1.0.so.0.4000.14 -rw-r--r-- 1 root root 306K Aug 21 2018 libpango-1.0.so.0.4000.14 lrwxrwxrwx 1 root root 30 Aug 21 2018 libpangocairo-1.0.so.0 -> libpangocairo-1.0.so.0.4000.14 -rw-r--r-- 1 root root 50K Aug 21 2018 libpangocairo-1.0.so.0.4000.14 lrwxrwxrwx 1 root root 28 Aug 21 2018 libpangoft2-1.0.so.0 -> libpangoft2-1.0.so.0.4000.14 -rw-r--r-- 1 root root 86K Aug 21 2018 libpangoft2-1.0.so.0.4000.14 lrwxrwxrwx 1 root root 20 Mar 24 2018 libpathplan.so -> libpathplan.so.4.0.0 lrwxrwxrwx 1 root root 20 Mar 24 2018 libpathplan.so.4 -> libpathplan.so.4.0.0 -rw-r--r-- 1 root root 31K Mar 24 2018 libpathplan.so.4.0.0 lrwxrwxrwx 1 root root 22 Mar 18 2018 libpciaccess.so.0 -> libpciaccess.so.0.11.1 -rw-r--r-- 1 root root 35K Mar 18 2018 libpciaccess.so.0.11.1 -rw-r--r-- 1 root root 615K May 17 07:42 libpcre.a lrwxrwxrwx 1 root root 34 May 17 07:42 libpcre.so -> /lib/x86_64-linux-gnu/libpcre.so.3 -rw-r--r-- 1 root root 576K May 17 07:42 libpcre16.a lrwxrwxrwx 1 root root 19 May 17 07:42 libpcre16.so -> libpcre16.so.3.13.3 lrwxrwxrwx 1 root root 19 May 17 07:42 libpcre16.so.3 -> libpcre16.so.3.13.3 -rw-r--r-- 1 root root 410K May 17 07:42 libpcre16.so.3.13.3 -rw-r--r-- 1 root root 558K May 17 07:42 libpcre32.a lrwxrwxrwx 1 root root 19 May 17 07:42 libpcre32.so -> libpcre32.so.3.13.3 lrwxrwxrwx 1 root root 19 May 17 07:42 libpcre32.so.3 -> libpcre32.so.3.13.3 -rw-r--r-- 1 root root 390K May 17 07:42 libpcre32.so.3.13.3 -rw-r--r-- 1 root root 51K May 17 07:42 libpcrecpp.a lrwxrwxrwx 1 root root 19 May 17 07:42 libpcrecpp.so -> libpcrecpp.so.0.0.1 lrwxrwxrwx 1 root root 19 May 17 07:42 libpcrecpp.so.0 -> libpcrecpp.so.0.0.1 -rw-r--r-- 1 root root 34K May 17 07:42 libpcrecpp.so.0.0.1 -rw-r--r-- 1 root root 6.6K May 17 07:42 libpcreposix.a lrwxrwxrwx 1 root root 22 May 17 07:42 libpcreposix.so -> libpcreposix.so.3.13.3 lrwxrwxrwx 1 root root 22 May 17 07:42 libpcreposix.so.3 -> libpcreposix.so.3.13.3 -rw-r--r-- 1 root root 9.9K May 17 07:42 libpcreposix.so.3.13.3 lrwxrwxrwx 1 root root 17 Oct 19 2020 libperl.so.5.26 -> libperl.so.5.26.1 -rw-r--r-- 1 root root 2.0M Oct 19 2020 libperl.so.5.26.1 lrwxrwxrwx 1 root root 21 Dec 17 2017 libpixman-1.so.0 -> libpixman-1.so.0.34.0 -rw-r--r-- 1 root root 659K Dec 17 2017 libpixman-1.so.0.34.0 -rw-r--r-- 1 root root 19K Feb 22 2018 libplc4.so -rw-r--r-- 1 root root 15K Feb 22 2018 libplds4.so lrwxrwxrwx 1 root root 10 Apr 30 2019 libpng.a -> libpng16.a lrwxrwxrwx 1 root root 11 Apr 30 2019 libpng.so -> libpng16.so -rw-r--r-- 1 root root 328K Apr 30 2019 libpng16.a lrwxrwxrwx 1 root root 19 Apr 30 2019 libpng16.so -> libpng16.so.16.34.0 lrwxrwxrwx 1 root root 19 Apr 30 2019 libpng16.so.16 -> libpng16.so.16.34.0 -rw-r--r-- 1 root root 198K Apr 30 2019 libpng16.so.16.34.0 lrwxrwxrwx 1 root root 20 Nov 26 2020 libpoppler.so.73 -> libpoppler.so.73.0.0 -rw-r--r-- 1 root root 2.6M Nov 26 2020 libpoppler.so.73.0.0 lrwxrwxrwx 1 root root 13 May 18 01:58 libpq.so.5 -> libpq.so.5.10 -rw-r--r-- 1 root root 290K May 18 01:58 libpq.so.5.10 lrwxrwxrwx 1 root root 17 Jun 18 2017 libproj.so.12 -> libproj.so.12.0.0 -rw-r--r-- 1 root root 420K Jun 18 2017 libproj.so.12.0.0 lrwxrwxrwx 1 root root 17 Dec 8 2020 libproxy.so.1 -> libproxy.so.1.0.0 -rw-r--r-- 1 root root 123K Dec 8 2020 libproxy.so.1.0.0 lrwxrwxrwx 1 root root 15 Mar 4 2018 libpsl.so.5 -> libpsl.so.5.2.0 -rw-r--r-- 1 root root 54K Mar 4 2018 libpsl.so.5.2.0 lrwxrwxrwx 1 root root 40 Aug 16 08:41 libpsm_infinipath.so.1 -> /etc/alternatives/libpsm_infinipath.so.1 -rw-r--r-- 1 root root 6.0M May 3 10:19 libpthread.a -rw-r--r-- 1 root root 252 May 3 10:19 libpthread.so -rw-r--r-- 1 root root 29K May 3 10:19 libpthread_nonshared.a lrwxrwxrwx 1 root root 19 Jul 1 15:56 libpython2.7.so.1 -> libpython2.7.so.1.0 -rw-r--r-- 1 root root 3.4M Jul 1 15:56 libpython2.7.so.1.0 lrwxrwxrwx 1 root root 17 Dec 9 2017 libqhull.so.7 -> libqhull.so.7.2.0 -rw-r--r-- 1 root root 358K Dec 9 2017 libqhull.so.7.2.0 lrwxrwxrwx 1 root root 20 Mar 10 2020 libquadmath.so.0 -> libquadmath.so.0.0.0 -rw-r--r-- 1 root root 256K Mar 10 2020 libquadmath.so.0.0.0 -rw-r--r-- 1 root root 91K Apr 26 2016 libraw1394.a -rw-r--r-- 1 root root 969 Apr 26 2016 libraw1394.la lrwxrwxrwx 1 root root 20 Apr 26 2016 libraw1394.so -> libraw1394.so.11.1.0 lrwxrwxrwx 1 root root 20 Apr 26 2016 libraw1394.so.11 -> libraw1394.so.11.1.0 -rw-r--r-- 1 root root 59K Apr 26 2016 libraw1394.so.11.1.0 lrwxrwxrwx 1 root root 21 Aug 5 2019 librdmacm.so.1 -> librdmacm.so.1.1.17.1 -rw-r--r-- 1 root root 87K Aug 5 2019 librdmacm.so.1.1.17.1 -rw-r--r-- 1 root root 136K May 3 10:19 libresolv.a lrwxrwxrwx 1 root root 36 May 3 10:19 libresolv.so -> /lib/x86_64-linux-gnu/libresolv.so.2 lrwxrwxrwx 1 root root 20 Oct 24 2016 librest-0.7.so.0 -> librest-0.7.so.0.0.0 -rw-r--r-- 1 root root 88K Oct 24 2016 librest-0.7.so.0.0.0 lrwxrwxrwx 1 root root 18 Dec 15 2017 libroken.so.18 -> libroken.so.18.1.0 -rw-r--r-- 1 root root 87K Dec 15 2017 libroken.so.18.1.0 -rw-r--r-- 1 root root 52K May 3 10:19 librpcsvc.a lrwxrwxrwx 1 root root 20 Jul 28 2020 librsvg-2.so.2 -> librsvg-2.so.2.40.20 -rw-r--r-- 1 root root 223K Jul 28 2020 librsvg-2.so.2.40.20 -rw-r--r-- 1 root root 76K May 3 10:19 librt.a lrwxrwxrwx 1 root root 32 May 3 10:19 librt.so -> /lib/x86_64-linux-gnu/librt.so.1 -rw-r--r-- 1 root root 111K Apr 27 2016 librtmp.so.1 lrwxrwxrwx 1 root root 18 Feb 15 2022 libsasl2.so.2 -> libsasl2.so.2.0.25 -rw-r--r-- 1 root root 107K Feb 15 2022 libsasl2.so.2.0.25 -rw-r--r-- 1 root root 251K Mar 1 2018 libsemanage.so.1 lrwxrwxrwx 1 root root 19 Mar 31 20:53 libsensors.so.4 -> libsensors.so.4.4.0 -rw-r--r-- 1 root root 59K Mar 31 20:53 libsensors.so.4.4.0 lrwxrwxrwx 1 root root 17 Aug 6 2017 libshine.so.3 -> libshine.so.3.0.1 -rw-r--r-- 1 root root 43K Aug 6 2017 libshine.so.3.0.1 -rw-r--r-- 1 root root 181K Jul 6 11:25 libsmime3.so lrwxrwxrwx 1 root root 18 Sep 5 2017 libsnappy.so.1 -> libsnappy.so.1.1.7 -rw-r--r-- 1 root root 31K Sep 5 2017 libsnappy.so.1.1.7 lrwxrwxrwx 1 root root 20 Oct 5 2017 libsocket++.so.1 -> libsocket++.so.1.0.2 -rw-r--r-- 1 root root 132K Oct 5 2017 libsocket++.so.1.0.2 lrwxrwxrwx 1 root root 20 Oct 9 2019 libsoup-2.4.so.1 -> libsoup-2.4.so.1.8.0 -rw-r--r-- 1 root root 971K Oct 9 2019 libsoup-2.4.so.1.8.0 lrwxrwxrwx 1 root root 26 Oct 9 2019 libsoup-gnome-2.4.so.1 -> libsoup-gnome-2.4.so.1.8.0 -rw-r--r-- 1 root root 9.9K Oct 9 2019 libsoup-gnome-2.4.so.1.8.0 lrwxrwxrwx 1 root root 16 Nov 4 2017 libsoxr.so.0 -> libsoxr.so.0.1.1 -rw-r--r-- 1 root root 183K Nov 4 2017 libsoxr.so.0.1.1 lrwxrwxrwx 1 root root 22 Nov 17 2016 libspatialite.so.7 -> libspatialite.so.7.1.0 -rw-r--r-- 1 root root 5.6M Nov 17 2016 libspatialite.so.7.1.0 lrwxrwxrwx 1 root root 17 Feb 8 2022 libspeex.so.1 -> libspeex.so.1.5.0 -rw-r--r-- 1 root root 102K Feb 8 2022 libspeex.so.1.5.0 lrwxrwxrwx 1 root root 19 Apr 28 14:04 libsqlite3.so.0 -> libsqlite3.so.0.8.6 -rw-r--r-- 1 root root 1.1M Apr 28 14:04 libsqlite3.so.0.8.6 lrwxrwxrwx 1 root root 22 Jul 31 2020 libssh-gcrypt.so.4 -> libssh-gcrypt.so.4.5.0 -rw-r--r-- 1 root root 465K Jul 31 2020 libssh-gcrypt.so.4.5.0 lrwxrwxrwx 1 root root 30 Jul 31 2020 libssh-gcrypt_threads.so.4 -> libssh-gcrypt_threads.so.4.5.0 -rw-r--r-- 1 root root 6.0K Jul 31 2020 libssh-gcrypt_threads.so.4.5.0 -rw-r--r-- 1 root root 564K Jul 4 11:25 libssl.so.1.1 -rw-r--r-- 1 root root 321K Jul 6 11:25 libssl3.so lrwxrwxrwx 1 root root 19 Mar 10 2020 libstdc++.so.6 -> libstdc++.so.6.0.25 -rw-r--r-- 1 root root 1.6M Mar 10 2020 libstdc++.so.6.0.25 lrwxrwxrwx 1 root root 19 Nov 6 2017 libsuperlu.so.5 -> libsuperlu.so.5.2.1 -rw-r--r-- 1 root root 447K Nov 6 2017 libsuperlu.so.5.2.1 -rw-r--r-- 1 root root 220K May 18 20:01 libswresample.a lrwxrwxrwx 1 root root 24 May 18 20:01 libswresample.so -> libswresample.so.2.9.100 lrwxrwxrwx 1 root root 24 May 18 20:01 libswresample.so.2 -> libswresample.so.2.9.100 -rw-r--r-- 1 root root 122K May 18 20:01 libswresample.so.2.9.100 -rw-r--r-- 1 root root 780K May 18 20:01 libswscale.a lrwxrwxrwx 1 root root 21 May 18 20:01 libswscale.so -> libswscale.so.4.8.100 lrwxrwxrwx 1 root root 21 May 18 20:01 libswscale.so.4 -> libswscale.so.4.8.100 -rw-r--r-- 1 root root 538K May 18 20:01 libswscale.so.4.8.100 lrwxrwxrwx 1 root root 14 Jul 28 2017 libsz.so.2 -> libsz.so.2.0.1 -rw-r--r-- 1 root root 9.9K Jul 28 2017 libsz.so.2.0.1 lrwxrwxrwx 1 root root 17 Jan 21 2018 libtasn1.so.6 -> libtasn1.so.6.5.5 -rw-r--r-- 1 root root 74K Jan 21 2018 libtasn1.so.6.5.5 lrwxrwxrwx 1 root root 11 Oct 4 2017 libtbb.so -> libtbb.so.2 -rw-r--r-- 1 root root 230K Oct 4 2017 libtbb.so.2 lrwxrwxrwx 1 root root 17 Oct 4 2017 libtbbmalloc.so -> libtbbmalloc.so.2 -rw-r--r-- 1 root root 110K Oct 4 2017 libtbbmalloc.so.2 lrwxrwxrwx 1 root root 23 Oct 4 2017 libtbbmalloc_proxy.so -> libtbbmalloc_proxy.so.2 -rw-r--r-- 1 root root 11K Oct 4 2017 libtbbmalloc_proxy.so.2 -rw-r--r-- 1 root root 1.7M Feb 22 2018 libtcl8.6.so lrwxrwxrwx 1 root root 12 Feb 22 2018 libtcl8.6.so.0 -> libtcl8.6.so lrwxrwxrwx 1 root root 21 Apr 7 2018 libtesseract.so.4 -> libtesseract.so.4.0.0 -rw-r--r-- 1 root root 3.1M Apr 7 2018 libtesseract.so.4.0.0 lrwxrwxrwx 1 root root 16 Feb 16 2018 libthai.so.0 -> libthai.so.0.3.0 -rw-r--r-- 1 root root 37K Feb 16 2018 libthai.so.0.3.0 lrwxrwxrwx 1 root root 19 May 27 2016 libtheora.so.0 -> libtheora.so.0.3.10 -rw-r--r-- 1 root root 327K May 27 2016 libtheora.so.0.3.10 lrwxrwxrwx 1 root root 21 May 27 2016 libtheoradec.so.1 -> libtheoradec.so.1.1.4 -rw-r--r-- 1 root root 119K May 27 2016 libtheoradec.so.1.1.4 lrwxrwxrwx 1 root root 21 May 27 2016 libtheoraenc.so.1 -> libtheoraenc.so.1.1.2 -rw-r--r-- 1 root root 251K May 27 2016 libtheoraenc.so.1.1.2 lrwxrwxrwx 1 root root 39 May 3 10:19 libthread_db.so -> /lib/x86_64-linux-gnu/libthread_db.so.1 lrwxrwxrwx 1 root root 13 May 23 2018 libtic.so.5 -> libtic.so.5.9 -rw-r--r-- 1 root root 62K May 23 2018 libtic.so.5.9 -rw-r--r-- 1 root root 752K May 11 15:09 libtiff.a lrwxrwxrwx 1 root root 16 May 11 15:09 libtiff.so -> libtiff.so.5.3.0 lrwxrwxrwx 1 root root 16 May 11 15:09 libtiff.so.5 -> libtiff.so.5.3.0 -rw-r--r-- 1 root root 479K May 11 15:09 libtiff.so.5.3.0 -rw-r--r-- 1 root root 8.5K May 11 15:09 libtiffxx.a lrwxrwxrwx 1 root root 18 May 11 15:09 libtiffxx.so -> libtiffxx.so.5.3.0 lrwxrwxrwx 1 root root 18 May 11 15:09 libtiffxx.so.5 -> libtiffxx.so.5.3.0 -rw-r--r-- 1 root root 10K May 11 15:09 libtiffxx.so.5.3.0 -rw-r--r-- 1 root root 1.4M Mar 24 2018 libtk8.6.so lrwxrwxrwx 1 root root 11 Mar 24 2018 libtk8.6.so.0 -> libtk8.6.so lrwxrwxrwx 1 root root 16 Mar 10 2020 libtsan.so.0 -> libtsan.so.0.0.0 -rw-r--r-- 1 root root 942K Mar 10 2020 libtsan.so.0.0.0 -rw-r--r-- 1 root root 3.6K Mar 10 2020 libtsan_preinit.o lrwxrwxrwx 1 root root 19 Aug 29 2017 libtwolame.so.0 -> libtwolame.so.0.0.0 -rw-r--r-- 1 root root 122K Aug 29 2017 libtwolame.so.0.0.0 lrwxrwxrwx 1 root root 17 Dec 4 2019 libubsan.so.0 -> libubsan.so.0.0.0 -rw-r--r-- 1 root root 322K Dec 4 2019 libubsan.so.0.0.0 lrwxrwxrwx 1 root root 21 Mar 21 2019 libunistring.so.2 -> libunistring.so.2.1.0 -rw-r--r-- 1 root root 1.5M Mar 21 2019 libunistring.so.2.1.0 lrwxrwxrwx 1 root root 22 Jul 12 13:52 liburiparser.so.1 -> liburiparser.so.1.0.20 -rw-r--r-- 1 root root 110K Jul 12 13:52 liburiparser.so.1.0.20 -rw-r--r-- 1 root root 15K May 3 10:19 libutil.a lrwxrwxrwx 1 root root 34 May 3 10:19 libutil.so -> /lib/x86_64-linux-gnu/libutil.so.1 lrwxrwxrwx 1 root root 20 Apr 3 2018 libva-drm.so.2 -> libva-drm.so.2.100.0 -rw-r--r-- 1 root root 11K Apr 3 2018 libva-drm.so.2.100.0 lrwxrwxrwx 1 root root 20 Apr 3 2018 libva-x11.so.2 -> libva-x11.so.2.100.0 -rw-r--r-- 1 root root 23K Apr 3 2018 libva-x11.so.2.100.0 lrwxrwxrwx 1 root root 16 Apr 3 2018 libva.so.2 -> libva.so.2.100.0 -rw-r--r-- 1 root root 131K Apr 3 2018 libva.so.2.100.0 lrwxrwxrwx 1 root root 17 Feb 19 2016 libvdpau.so.1 -> libvdpau.so.1.0.0 -rw-r--r-- 1 root root 15K Feb 19 2016 libvdpau.so.1.0.0 lrwxrwxrwx 1 root root 18 Mar 16 2018 libvorbis.so.0 -> libvorbis.so.0.4.8 -rw-r--r-- 1 root root 171K Mar 16 2018 libvorbis.so.0.4.8 lrwxrwxrwx 1 root root 22 Mar 16 2018 libvorbisenc.so.2 -> libvorbisenc.so.2.0.11 -rw-r--r-- 1 root root 675K Mar 16 2018 libvorbisenc.so.2.0.11 lrwxrwxrwx 1 root root 22 Mar 16 2018 libvorbisfile.so.3 -> libvorbisfile.so.3.3.7 -rw-r--r-- 1 root root 31K Mar 16 2018 libvorbisfile.so.3.3.7 lrwxrwxrwx 1 root root 15 Nov 19 2019 libvpx.so.5 -> libvpx.so.5.0.0 lrwxrwxrwx 1 root root 15 Nov 19 2019 libvpx.so.5.0 -> libvpx.so.5.0.0 -rw-r--r-- 1 root root 2.3M Nov 19 2019 libvpx.so.5.0.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkChartsCore-6.3.so.6.3 -> libvtkChartsCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.4M Dec 17 2017 libvtkChartsCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkChartsCorePython27D-6.3.so.6.3 -> libvtkChartsCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 939K Dec 17 2017 libvtkChartsCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkChartsCoreTCL-6.3.so.6.3 -> libvtkChartsCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 573K Dec 17 2017 libvtkChartsCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkCommonColor-6.3.so.6.3 -> libvtkCommonColor-6.3.so.6.3.0 -rw-r--r-- 1 root root 106K Dec 17 2017 libvtkCommonColor-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkCommonColorPython27D-6.3.so.6.3 -> libvtkCommonColorPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 70K Dec 17 2017 libvtkCommonColorPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkCommonColorTCL-6.3.so.6.3 -> libvtkCommonColorTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkCommonColorTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkCommonComputationalGeometry-6.3.so.6.3 -> libvtkCommonComputationalGeometry-6.3.so.6.3.0 -rw-r--r-- 1 root root 240K Dec 17 2017 libvtkCommonComputationalGeometry-6.3.so.6.3.0 lrwxrwxrwx 1 root root 55 Dec 17 2017 libvtkCommonComputationalGeometryPython27D-6.3.so.6.3 -> libvtkCommonComputationalGeometryPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 423K Dec 17 2017 libvtkCommonComputationalGeometryPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 49 Dec 17 2017 libvtkCommonComputationalGeometryTCL-6.3.so.6.3 -> libvtkCommonComputationalGeometryTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 259K Dec 17 2017 libvtkCommonComputationalGeometryTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkCommonCore-6.3.so.6.3 -> libvtkCommonCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.2M Dec 17 2017 libvtkCommonCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkCommonCorePython27D-6.3.so.6.3 -> libvtkCommonCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.0M Dec 17 2017 libvtkCommonCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkCommonCoreTCL-6.3.so.6.3 -> libvtkCommonCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 900K Dec 17 2017 libvtkCommonCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkCommonDataModel-6.3.so.6.3 -> libvtkCommonDataModel-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.4M Dec 17 2017 libvtkCommonDataModel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkCommonDataModelPython27D-6.3.so.6.3 -> libvtkCommonDataModelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 4.2M Dec 17 2017 libvtkCommonDataModelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkCommonDataModelTCL-6.3.so.6.3 -> libvtkCommonDataModelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.2M Dec 17 2017 libvtkCommonDataModelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkCommonExecutionModel-6.3.so.6.3 -> libvtkCommonExecutionModel-6.3.so.6.3.0 -rw-r--r-- 1 root root 669K Dec 17 2017 libvtkCommonExecutionModel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 48 Dec 17 2017 libvtkCommonExecutionModelPython27D-6.3.so.6.3 -> libvtkCommonExecutionModelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 621K Dec 17 2017 libvtkCommonExecutionModelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkCommonExecutionModelTCL-6.3.so.6.3 -> libvtkCommonExecutionModelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 474K Dec 17 2017 libvtkCommonExecutionModelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkCommonMath-6.3.so.6.3 -> libvtkCommonMath-6.3.so.6.3.0 -rw-r--r-- 1 root root 128K Dec 17 2017 libvtkCommonMath-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkCommonMathPython27D-6.3.so.6.3 -> libvtkCommonMathPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 347K Dec 17 2017 libvtkCommonMathPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkCommonMathTCL-6.3.so.6.3 -> libvtkCommonMathTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 103K Dec 17 2017 libvtkCommonMathTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkCommonMisc-6.3.so.6.3 -> libvtkCommonMisc-6.3.so.6.3.0 -rw-r--r-- 1 root root 88K Dec 17 2017 libvtkCommonMisc-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkCommonMiscPython27D-6.3.so.6.3 -> libvtkCommonMiscPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 88K Dec 17 2017 libvtkCommonMiscPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkCommonMiscTCL-6.3.so.6.3 -> libvtkCommonMiscTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkCommonMiscTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkCommonSystem-6.3.so.6.3 -> libvtkCommonSystem-6.3.so.6.3.0 -rw-r--r-- 1 root root 76K Dec 17 2017 libvtkCommonSystem-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkCommonSystemPython27D-6.3.so.6.3 -> libvtkCommonSystemPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkCommonSystemPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkCommonSystemTCL-6.3.so.6.3 -> libvtkCommonSystemTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 67K Dec 17 2017 libvtkCommonSystemTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkCommonTransforms-6.3.so.6.3 -> libvtkCommonTransforms-6.3.so.6.3.0 -rw-r--r-- 1 root root 184K Dec 17 2017 libvtkCommonTransforms-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkCommonTransformsPython27D-6.3.so.6.3 -> libvtkCommonTransformsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 263K Dec 17 2017 libvtkCommonTransformsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkCommonTransformsTCL-6.3.so.6.3 -> libvtkCommonTransformsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 200K Dec 17 2017 libvtkCommonTransformsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkDICOMParser-6.3.so.6.3 -> libvtkDICOMParser-6.3.so.6.3.0 -rw-r--r-- 1 root root 96K Dec 17 2017 libvtkDICOMParser-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkDomainsChemistry-6.3.so.6.3 -> libvtkDomainsChemistry-6.3.so.6.3.0 -rw-r--r-- 1 root root 293K Dec 17 2017 libvtkDomainsChemistry-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkDomainsChemistryPython27D-6.3.so.6.3 -> libvtkDomainsChemistryPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 181K Dec 17 2017 libvtkDomainsChemistryPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkDomainsChemistryTCL-6.3.so.6.3 -> libvtkDomainsChemistryTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 127K Dec 17 2017 libvtkDomainsChemistryTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkFiltersAMR-6.3.so.6.3 -> libvtkFiltersAMR-6.3.so.6.3.0 -rw-r--r-- 1 root root 273K Dec 17 2017 libvtkFiltersAMR-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersAMRPython27D-6.3.so.6.3 -> libvtkFiltersAMRPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 183K Dec 17 2017 libvtkFiltersAMRPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkFiltersAMRTCL-6.3.so.6.3 -> libvtkFiltersAMRTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 139K Dec 17 2017 libvtkFiltersAMRTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkFiltersCore-6.3.so.6.3 -> libvtkFiltersCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 4.9M Dec 17 2017 libvtkFiltersCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkFiltersCorePython27D-6.3.so.6.3 -> libvtkFiltersCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.6M Dec 17 2017 libvtkFiltersCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersCoreTCL-6.3.so.6.3 -> libvtkFiltersCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.5M Dec 17 2017 libvtkFiltersCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersExtraction-6.3.so.6.3 -> libvtkFiltersExtraction-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.1M Dec 17 2017 libvtkFiltersExtraction-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkFiltersExtractionPython27D-6.3.so.6.3 -> libvtkFiltersExtractionPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 497K Dec 17 2017 libvtkFiltersExtractionPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkFiltersExtractionTCL-6.3.so.6.3 -> libvtkFiltersExtractionTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 360K Dec 17 2017 libvtkFiltersExtractionTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkFiltersFlowPaths-6.3.so.6.3 -> libvtkFiltersFlowPaths-6.3.so.6.3.0 -rw-r--r-- 1 root root 473K Dec 17 2017 libvtkFiltersFlowPaths-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersFlowPathsPython27D-6.3.so.6.3 -> libvtkFiltersFlowPathsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 495K Dec 17 2017 libvtkFiltersFlowPathsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersFlowPathsTCL-6.3.so.6.3 -> libvtkFiltersFlowPathsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 264K Dec 17 2017 libvtkFiltersFlowPathsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersGeneral-6.3.so.6.3 -> libvtkFiltersGeneral-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.2M Dec 17 2017 libvtkFiltersGeneral-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersGeneralPython27D-6.3.so.6.3 -> libvtkFiltersGeneralPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.2M Dec 17 2017 libvtkFiltersGeneralPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersGeneralTCL-6.3.so.6.3 -> libvtkFiltersGeneralTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.5M Dec 17 2017 libvtkFiltersGeneralTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersGeneric-6.3.so.6.3 -> libvtkFiltersGeneric-6.3.so.6.3.0 -rw-r--r-- 1 root root 296K Dec 17 2017 libvtkFiltersGeneric-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersGenericPython27D-6.3.so.6.3 -> libvtkFiltersGenericPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 391K Dec 17 2017 libvtkFiltersGenericPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersGenericTCL-6.3.so.6.3 -> libvtkFiltersGenericTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 220K Dec 17 2017 libvtkFiltersGenericTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkFiltersGeometry-6.3.so.6.3 -> libvtkFiltersGeometry-6.3.so.6.3.0 -rw-r--r-- 1 root root 546K Dec 17 2017 libvtkFiltersGeometry-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkFiltersGeometryPython27D-6.3.so.6.3 -> libvtkFiltersGeometryPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 423K Dec 17 2017 libvtkFiltersGeometryPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkFiltersGeometryTCL-6.3.so.6.3 -> libvtkFiltersGeometryTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 280K Dec 17 2017 libvtkFiltersGeometryTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkFiltersHybrid-6.3.so.6.3 -> libvtkFiltersHybrid-6.3.so.6.3.0 -rw-r--r-- 1 root root 867K Dec 17 2017 libvtkFiltersHybrid-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkFiltersHybridPython27D-6.3.so.6.3 -> libvtkFiltersHybridPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 701K Dec 17 2017 libvtkFiltersHybridPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkFiltersHybridTCL-6.3.so.6.3 -> libvtkFiltersHybridTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 432K Dec 17 2017 libvtkFiltersHybridTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkFiltersHyperTree-6.3.so.6.3 -> libvtkFiltersHyperTree-6.3.so.6.3.0 -rw-r--r-- 1 root root 297K Dec 17 2017 libvtkFiltersHyperTree-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersHyperTreePython27D-6.3.so.6.3 -> libvtkFiltersHyperTreePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 218K Dec 17 2017 libvtkFiltersHyperTreePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersHyperTreeTCL-6.3.so.6.3 -> libvtkFiltersHyperTreeTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 163K Dec 17 2017 libvtkFiltersHyperTreeTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersImaging-6.3.so.6.3 -> libvtkFiltersImaging-6.3.so.6.3.0 -rw-r--r-- 1 root root 136K Dec 17 2017 libvtkFiltersImaging-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersImagingPython27D-6.3.so.6.3 -> libvtkFiltersImagingPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 111K Dec 17 2017 libvtkFiltersImagingPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersImagingTCL-6.3.so.6.3 -> libvtkFiltersImagingTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkFiltersImagingTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkFiltersModeling-6.3.so.6.3 -> libvtkFiltersModeling-6.3.so.6.3.0 -rw-r--r-- 1 root root 481K Dec 17 2017 libvtkFiltersModeling-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkFiltersModelingPython27D-6.3.so.6.3 -> libvtkFiltersModelingPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 607K Dec 17 2017 libvtkFiltersModelingPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkFiltersModelingTCL-6.3.so.6.3 -> libvtkFiltersModelingTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 388K Dec 17 2017 libvtkFiltersModelingTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkFiltersParallel-6.3.so.6.3 -> libvtkFiltersParallel-6.3.so.6.3.0 -rw-r--r-- 1 root root 860K Dec 17 2017 libvtkFiltersParallel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkFiltersParallelFlowPaths-6.3.so.6.3 -> libvtkFiltersParallelFlowPaths-6.3.so.6.3.0 -rw-r--r-- 1 root root 232K Dec 17 2017 libvtkFiltersParallelFlowPaths-6.3.so.6.3.0 lrwxrwxrwx 1 root root 52 Dec 17 2017 libvtkFiltersParallelFlowPathsPython27D-6.3.so.6.3 -> libvtkFiltersParallelFlowPathsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 40K Dec 17 2017 libvtkFiltersParallelFlowPathsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkFiltersParallelFlowPathsTCL-6.3.so.6.3 -> libvtkFiltersParallelFlowPathsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 43K Dec 17 2017 libvtkFiltersParallelFlowPathsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersParallelGeometry-6.3.so.6.3 -> libvtkFiltersParallelGeometry-6.3.so.6.3.0 -rw-r--r-- 1 root root 265K Dec 17 2017 libvtkFiltersParallelGeometry-6.3.so.6.3.0 lrwxrwxrwx 1 root root 51 Dec 17 2017 libvtkFiltersParallelGeometryPython27D-6.3.so.6.3 -> libvtkFiltersParallelGeometryPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 65K Dec 17 2017 libvtkFiltersParallelGeometryPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkFiltersParallelGeometryTCL-6.3.so.6.3 -> libvtkFiltersParallelGeometryTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 55K Dec 17 2017 libvtkFiltersParallelGeometryTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkFiltersParallelImaging-6.3.so.6.3 -> libvtkFiltersParallelImaging-6.3.so.6.3.0 -rw-r--r-- 1 root root 140K Dec 17 2017 libvtkFiltersParallelImaging-6.3.so.6.3.0 lrwxrwxrwx 1 root root 50 Dec 17 2017 libvtkFiltersParallelImagingPython27D-6.3.so.6.3 -> libvtkFiltersParallelImagingPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 45K Dec 17 2017 libvtkFiltersParallelImagingPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersParallelImagingTCL-6.3.so.6.3 -> libvtkFiltersParallelImagingTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkFiltersParallelImagingTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkFiltersParallelMPI-6.3.so.6.3 -> libvtkFiltersParallelMPI-6.3.so.6.3.0 -rw-r--r-- 1 root root 237K Dec 17 2017 libvtkFiltersParallelMPI-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkFiltersParallelMPIPython27D-6.3.so.6.3 -> libvtkFiltersParallelMPIPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 94K Dec 17 2017 libvtkFiltersParallelMPIPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkFiltersParallelMPITCL-6.3.so.6.3 -> libvtkFiltersParallelMPITCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 67K Dec 17 2017 libvtkFiltersParallelMPITCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkFiltersParallelPython27D-6.3.so.6.3 -> libvtkFiltersParallelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 484K Dec 17 2017 libvtkFiltersParallelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersParallelStatistics-6.3.so.6.3 -> libvtkFiltersParallelStatistics-6.3.so.6.3.0 -rw-r--r-- 1 root root 220K Dec 17 2017 libvtkFiltersParallelStatistics-6.3.so.6.3.0 lrwxrwxrwx 1 root root 53 Dec 17 2017 libvtkFiltersParallelStatisticsPython27D-6.3.so.6.3 -> libvtkFiltersParallelStatisticsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 74K Dec 17 2017 libvtkFiltersParallelStatisticsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 47 Dec 17 2017 libvtkFiltersParallelStatisticsTCL-6.3.so.6.3 -> libvtkFiltersParallelStatisticsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkFiltersParallelStatisticsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkFiltersParallelTCL-6.3.so.6.3 -> libvtkFiltersParallelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 380K Dec 17 2017 libvtkFiltersParallelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersProgrammable-6.3.so.6.3 -> libvtkFiltersProgrammable-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkFiltersProgrammable-6.3.so.6.3.0 lrwxrwxrwx 1 root root 47 Dec 17 2017 libvtkFiltersProgrammablePython27D-6.3.so.6.3 -> libvtkFiltersProgrammablePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 61K Dec 17 2017 libvtkFiltersProgrammablePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkFiltersProgrammableTCL-6.3.so.6.3 -> libvtkFiltersProgrammableTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkFiltersProgrammableTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkFiltersPython-6.3.so.6.3 -> libvtkFiltersPython-6.3.so.6.3.0 -rw-r--r-- 1 root root 35K Dec 17 2017 libvtkFiltersPython-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkFiltersPythonPython27D-6.3.so.6.3 -> libvtkFiltersPythonPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkFiltersPythonPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkFiltersReebGraph-6.3.so.6.3 -> libvtkFiltersReebGraph-6.3.so.6.3.0 -rw-r--r-- 1 root root 108K Dec 17 2017 libvtkFiltersReebGraph-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersReebGraphPython27D-6.3.so.6.3 -> libvtkFiltersReebGraphPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 73K Dec 17 2017 libvtkFiltersReebGraphPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersReebGraphTCL-6.3.so.6.3 -> libvtkFiltersReebGraphTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkFiltersReebGraphTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkFiltersSMP-6.3.so.6.3 -> libvtkFiltersSMP-6.3.so.6.3.0 -rw-r--r-- 1 root root 407K Dec 17 2017 libvtkFiltersSMP-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersSMPPython27D-6.3.so.6.3 -> libvtkFiltersSMPPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 110K Dec 17 2017 libvtkFiltersSMPPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkFiltersSMPTCL-6.3.so.6.3 -> libvtkFiltersSMPTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 87K Dec 17 2017 libvtkFiltersSMPTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkFiltersSelection-6.3.so.6.3 -> libvtkFiltersSelection-6.3.so.6.3.0 -rw-r--r-- 1 root root 100K Dec 17 2017 libvtkFiltersSelection-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersSelectionPython27D-6.3.so.6.3 -> libvtkFiltersSelectionPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 109K Dec 17 2017 libvtkFiltersSelectionPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersSelectionTCL-6.3.so.6.3 -> libvtkFiltersSelectionTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 83K Dec 17 2017 libvtkFiltersSelectionTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersSources-6.3.so.6.3 -> libvtkFiltersSources-6.3.so.6.3.0 -rw-r--r-- 1 root root 773K Dec 17 2017 libvtkFiltersSources-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersSourcesPython27D-6.3.so.6.3 -> libvtkFiltersSourcesPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.2M Dec 17 2017 libvtkFiltersSourcesPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersSourcesTCL-6.3.so.6.3 -> libvtkFiltersSourcesTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 760K Dec 17 2017 libvtkFiltersSourcesTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersStatistics-6.3.so.6.3 -> libvtkFiltersStatistics-6.3.so.6.3.0 -rw-r--r-- 1 root root 549K Dec 17 2017 libvtkFiltersStatistics-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkFiltersStatisticsPython27D-6.3.so.6.3 -> libvtkFiltersStatisticsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 341K Dec 17 2017 libvtkFiltersStatisticsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkFiltersStatisticsTCL-6.3.so.6.3 -> libvtkFiltersStatisticsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 224K Dec 17 2017 libvtkFiltersStatisticsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersTexture-6.3.so.6.3 -> libvtkFiltersTexture-6.3.so.6.3.0 -rw-r--r-- 1 root root 151K Dec 17 2017 libvtkFiltersTexture-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersTexturePython27D-6.3.so.6.3 -> libvtkFiltersTexturePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 219K Dec 17 2017 libvtkFiltersTexturePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersTextureTCL-6.3.so.6.3 -> libvtkFiltersTextureTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 167K Dec 17 2017 libvtkFiltersTextureTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersVerdict-6.3.so.6.3 -> libvtkFiltersVerdict-6.3.so.6.3.0 -rw-r--r-- 1 root root 124K Dec 17 2017 libvtkFiltersVerdict-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersVerdictPython27D-6.3.so.6.3 -> libvtkFiltersVerdictPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 267K Dec 17 2017 libvtkFiltersVerdictPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersVerdictTCL-6.3.so.6.3 -> libvtkFiltersVerdictTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 156K Dec 17 2017 libvtkFiltersVerdictTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkGeovisCore-6.3.so.6.3 -> libvtkGeovisCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 707K Dec 17 2017 libvtkGeovisCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkGeovisCorePython27D-6.3.so.6.3 -> libvtkGeovisCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 555K Dec 17 2017 libvtkGeovisCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkGeovisCoreTCL-6.3.so.6.3 -> libvtkGeovisCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 392K Dec 17 2017 libvtkGeovisCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 24 Dec 17 2017 libvtkIOAMR-6.3.so.6.3 -> libvtkIOAMR-6.3.so.6.3.0 -rw-r--r-- 1 root root 282K Dec 17 2017 libvtkIOAMR-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkIOAMRPython27D-6.3.so.6.3 -> libvtkIOAMRPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 107K Dec 17 2017 libvtkIOAMRPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOAMRTCL-6.3.so.6.3 -> libvtkIOAMRTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 91K Dec 17 2017 libvtkIOAMRTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkIOCore-6.3.so.6.3 -> libvtkIOCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 458K Dec 17 2017 libvtkIOCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOCorePython27D-6.3.so.6.3 -> libvtkIOCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 322K Dec 17 2017 libvtkIOCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOCoreTCL-6.3.so.6.3 -> libvtkIOCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 240K Dec 17 2017 libvtkIOCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOEnSight-6.3.so.6.3 -> libvtkIOEnSight-6.3.so.6.3.0 -rw-r--r-- 1 root root 549K Dec 17 2017 libvtkIOEnSight-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkIOEnSightPython27D-6.3.so.6.3 -> libvtkIOEnSightPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 141K Dec 17 2017 libvtkIOEnSightPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkIOEnSightTCL-6.3.so.6.3 -> libvtkIOEnSightTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 95K Dec 17 2017 libvtkIOEnSightTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOExodus-6.3.so.6.3 -> libvtkIOExodus-6.3.so.6.3.0 -rw-r--r-- 1 root root 660K Dec 17 2017 libvtkIOExodus-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOExodusPython27D-6.3.so.6.3 -> libvtkIOExodusPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 385K Dec 17 2017 libvtkIOExodusPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOExodusTCL-6.3.so.6.3 -> libvtkIOExodusTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 220K Dec 17 2017 libvtkIOExodusTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOExport-6.3.so.6.3 -> libvtkIOExport-6.3.so.6.3.0 -rw-r--r-- 1 root root 513K Dec 17 2017 libvtkIOExport-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOExportPython27D-6.3.so.6.3 -> libvtkIOExportPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 288K Dec 17 2017 libvtkIOExportPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOExportTCL-6.3.so.6.3 -> libvtkIOExportTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 183K Dec 17 2017 libvtkIOExportTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOFFMPEG-6.3.so.6.3 -> libvtkIOFFMPEG-6.3.so.6.3.0 -rw-r--r-- 1 root root 55K Dec 17 2017 libvtkIOFFMPEG-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOFFMPEGPython27D-6.3.so.6.3 -> libvtkIOFFMPEGPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 44K Dec 17 2017 libvtkIOFFMPEGPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOFFMPEGTCL-6.3.so.6.3 -> libvtkIOFFMPEGTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 31K Dec 17 2017 libvtkIOFFMPEGTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkIOGDAL-6.3.so.6.3 -> libvtkIOGDAL-6.3.so.6.3.0 -rw-r--r-- 1 root root 152K Dec 17 2017 libvtkIOGDAL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOGDALPython27D-6.3.so.6.3 -> libvtkIOGDALPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 60K Dec 17 2017 libvtkIOGDALPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOGDALTCL-6.3.so.6.3 -> libvtkIOGDALTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkIOGDALTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOGeoJSON-6.3.so.6.3 -> libvtkIOGeoJSON-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkIOGeoJSON-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkIOGeoJSONPython27D-6.3.so.6.3 -> libvtkIOGeoJSONPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 40K Dec 17 2017 libvtkIOGeoJSONPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkIOGeoJSONTCL-6.3.so.6.3 -> libvtkIOGeoJSONTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 35K Dec 17 2017 libvtkIOGeoJSONTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOGeometry-6.3.so.6.3 -> libvtkIOGeometry-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.1M Dec 17 2017 libvtkIOGeometry-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOGeometryPython27D-6.3.so.6.3 -> libvtkIOGeometryPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 621K Dec 17 2017 libvtkIOGeometryPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkIOGeometryTCL-6.3.so.6.3 -> libvtkIOGeometryTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 400K Dec 17 2017 libvtkIOGeometryTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkIOImage-6.3.so.6.3 -> libvtkIOImage-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.6M Dec 17 2017 libvtkIOImage-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOImagePython27D-6.3.so.6.3 -> libvtkIOImagePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 900K Dec 17 2017 libvtkIOImagePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOImageTCL-6.3.so.6.3 -> libvtkIOImageTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 540K Dec 17 2017 libvtkIOImageTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOImport-6.3.so.6.3 -> libvtkIOImport-6.3.so.6.3.0 -rw-r--r-- 1 root root 276K Dec 17 2017 libvtkIOImport-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOImportPython27D-6.3.so.6.3 -> libvtkIOImportPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 57K Dec 17 2017 libvtkIOImportPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOImportTCL-6.3.so.6.3 -> libvtkIOImportTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkIOImportTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOInfovis-6.3.so.6.3 -> libvtkIOInfovis-6.3.so.6.3.0 -rw-r--r-- 1 root root 562K Dec 17 2017 libvtkIOInfovis-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkIOInfovisPython27D-6.3.so.6.3 -> libvtkIOInfovisPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 320K Dec 17 2017 libvtkIOInfovisPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkIOInfovisTCL-6.3.so.6.3 -> libvtkIOInfovisTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 212K Dec 17 2017 libvtkIOInfovisTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOLSDyna-6.3.so.6.3 -> libvtkIOLSDyna-6.3.so.6.3.0 -rw-r--r-- 1 root root 313K Dec 17 2017 libvtkIOLSDyna-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOLSDynaPython27D-6.3.so.6.3 -> libvtkIOLSDynaPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 130K Dec 17 2017 libvtkIOLSDynaPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOLSDynaTCL-6.3.so.6.3 -> libvtkIOLSDynaTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkIOLSDynaTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOLegacy-6.3.so.6.3 -> libvtkIOLegacy-6.3.so.6.3.0 -rw-r--r-- 1 root root 655K Dec 17 2017 libvtkIOLegacy-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOLegacyPython27D-6.3.so.6.3 -> libvtkIOLegacyPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 354K Dec 17 2017 libvtkIOLegacyPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOLegacyTCL-6.3.so.6.3 -> libvtkIOLegacyTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 240K Dec 17 2017 libvtkIOLegacyTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkIOMINC-6.3.so.6.3 -> libvtkIOMINC-6.3.so.6.3.0 -rw-r--r-- 1 root root 450K Dec 17 2017 libvtkIOMINC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOMINCPython27D-6.3.so.6.3 -> libvtkIOMINCPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 214K Dec 17 2017 libvtkIOMINCPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOMINCTCL-6.3.so.6.3 -> libvtkIOMINCTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 151K Dec 17 2017 libvtkIOMINCTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOMPIImage-6.3.so.6.3 -> libvtkIOMPIImage-6.3.so.6.3.0 -rw-r--r-- 1 root root 161K Dec 17 2017 libvtkIOMPIImage-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOMPIImagePython27D-6.3.so.6.3 -> libvtkIOMPIImagePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 28K Dec 17 2017 libvtkIOMPIImagePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkIOMPIImageTCL-6.3.so.6.3 -> libvtkIOMPIImageTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 23K Dec 17 2017 libvtkIOMPIImageTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkIOMPIParallel-6.3.so.6.3 -> libvtkIOMPIParallel-6.3.so.6.3.0 -rw-r--r-- 1 root root 217K Dec 17 2017 libvtkIOMPIParallel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkIOMPIParallelPython27D-6.3.so.6.3 -> libvtkIOMPIParallelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkIOMPIParallelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOMPIParallelTCL-6.3.so.6.3 -> libvtkIOMPIParallelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkIOMPIParallelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkIOMovie-6.3.so.6.3 -> libvtkIOMovie-6.3.so.6.3.0 -rw-r--r-- 1 root root 59K Dec 17 2017 libvtkIOMovie-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOMoviePython27D-6.3.so.6.3 -> libvtkIOMoviePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 49K Dec 17 2017 libvtkIOMoviePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOMovieTCL-6.3.so.6.3 -> libvtkIOMovieTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 39K Dec 17 2017 libvtkIOMovieTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkIOMySQL-6.3.so.6.3 -> libvtkIOMySQL-6.3.so.6.3.0 -rw-r--r-- 1 root root 144K Dec 17 2017 libvtkIOMySQL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOMySQLPython27D-6.3.so.6.3 -> libvtkIOMySQLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 86K Dec 17 2017 libvtkIOMySQLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOMySQLTCL-6.3.so.6.3 -> libvtkIOMySQLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkIOMySQLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIONetCDF-6.3.so.6.3 -> libvtkIONetCDF-6.3.so.6.3.0 -rw-r--r-- 1 root root 410K Dec 17 2017 libvtkIONetCDF-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIONetCDFPython27D-6.3.so.6.3 -> libvtkIONetCDFPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 201K Dec 17 2017 libvtkIONetCDFPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIONetCDFTCL-6.3.so.6.3 -> libvtkIONetCDFTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 139K Dec 17 2017 libvtkIONetCDFTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkIOODBC-6.3.so.6.3 -> libvtkIOODBC-6.3.so.6.3.0 -rw-r--r-- 1 root root 156K Dec 17 2017 libvtkIOODBC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOODBCPython27D-6.3.so.6.3 -> libvtkIOODBCPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 65K Dec 17 2017 libvtkIOODBCPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOODBCTCL-6.3.so.6.3 -> libvtkIOODBCTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkIOODBCTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 24 Dec 17 2017 libvtkIOPLY-6.3.so.6.3 -> libvtkIOPLY-6.3.so.6.3.0 -rw-r--r-- 1 root root 97K Dec 17 2017 libvtkIOPLY-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkIOPLYPython27D-6.3.so.6.3 -> libvtkIOPLYPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 73K Dec 17 2017 libvtkIOPLYPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOPLYTCL-6.3.so.6.3 -> libvtkIOPLYTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 43K Dec 17 2017 libvtkIOPLYTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOParallel-6.3.so.6.3 -> libvtkIOParallel-6.3.so.6.3.0 -rw-r--r-- 1 root root 526K Dec 17 2017 libvtkIOParallel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOParallelExodus-6.3.so.6.3 -> libvtkIOParallelExodus-6.3.so.6.3.0 -rw-r--r-- 1 root root 148K Dec 17 2017 libvtkIOParallelExodus-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkIOParallelExodusPython27D-6.3.so.6.3 -> libvtkIOParallelExodusPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 48K Dec 17 2017 libvtkIOParallelExodusPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOParallelExodusTCL-6.3.so.6.3 -> libvtkIOParallelExodusTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 35K Dec 17 2017 libvtkIOParallelExodusTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOParallelLSDyna-6.3.so.6.3 -> libvtkIOParallelLSDyna-6.3.so.6.3.0 -rw-r--r-- 1 root root 55K Dec 17 2017 libvtkIOParallelLSDyna-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkIOParallelLSDynaPython27D-6.3.so.6.3 -> libvtkIOParallelLSDynaPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 19K Dec 17 2017 libvtkIOParallelLSDynaPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOParallelLSDynaTCL-6.3.so.6.3 -> libvtkIOParallelLSDynaTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 19K Dec 17 2017 libvtkIOParallelLSDynaTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOParallelNetCDF-6.3.so.6.3 -> libvtkIOParallelNetCDF-6.3.so.6.3.0 -rw-r--r-- 1 root root 113K Dec 17 2017 libvtkIOParallelNetCDF-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkIOParallelNetCDFPython27D-6.3.so.6.3 -> libvtkIOParallelNetCDFPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 32K Dec 17 2017 libvtkIOParallelNetCDFPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOParallelNetCDFTCL-6.3.so.6.3 -> libvtkIOParallelNetCDFTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkIOParallelNetCDFTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOParallelPython27D-6.3.so.6.3 -> libvtkIOParallelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 221K Dec 17 2017 libvtkIOParallelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkIOParallelTCL-6.3.so.6.3 -> libvtkIOParallelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 139K Dec 17 2017 libvtkIOParallelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkIOParallelXML-6.3.so.6.3 -> libvtkIOParallelXML-6.3.so.6.3.0 -rw-r--r-- 1 root root 204K Dec 17 2017 libvtkIOParallelXML-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkIOParallelXMLPython27D-6.3.so.6.3 -> libvtkIOParallelXMLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 95K Dec 17 2017 libvtkIOParallelXMLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOParallelXMLTCL-6.3.so.6.3 -> libvtkIOParallelXMLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 87K Dec 17 2017 libvtkIOParallelXMLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkIOPostgreSQL-6.3.so.6.3 -> libvtkIOPostgreSQL-6.3.so.6.3.0 -rw-r--r-- 1 root root 160K Dec 17 2017 libvtkIOPostgreSQL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkIOPostgreSQLPython27D-6.3.so.6.3 -> libvtkIOPostgreSQLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 73K Dec 17 2017 libvtkIOPostgreSQLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOPostgreSQLTCL-6.3.so.6.3 -> libvtkIOPostgreSQLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 59K Dec 17 2017 libvtkIOPostgreSQLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 24 Dec 17 2017 libvtkIOSQL-6.3.so.6.3 -> libvtkIOSQL-6.3.so.6.3.0 -rw-r--r-- 1 root root 277K Dec 17 2017 libvtkIOSQL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkIOSQLPython27D-6.3.so.6.3 -> libvtkIOSQLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 202K Dec 17 2017 libvtkIOSQLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOSQLTCL-6.3.so.6.3 -> libvtkIOSQLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 148K Dec 17 2017 libvtkIOSQLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkIOVPIC-6.3.so.6.3 -> libvtkIOVPIC-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkIOVPIC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOVPICPython27D-6.3.so.6.3 -> libvtkIOVPICPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 44K Dec 17 2017 libvtkIOVPICPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOVPICTCL-6.3.so.6.3 -> libvtkIOVPICTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 39K Dec 17 2017 libvtkIOVPICTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkIOVideo-6.3.so.6.3 -> libvtkIOVideo-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkIOVideo-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOVideoPython27D-6.3.so.6.3 -> libvtkIOVideoPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 85K Dec 17 2017 libvtkIOVideoPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOVideoTCL-6.3.so.6.3 -> libvtkIOVideoTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 43K Dec 17 2017 libvtkIOVideoTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 24 Dec 17 2017 libvtkIOXML-6.3.so.6.3 -> libvtkIOXML-6.3.so.6.3.0 -rw-r--r-- 1 root root 945K Dec 17 2017 libvtkIOXML-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOXMLParser-6.3.so.6.3 -> libvtkIOXMLParser-6.3.so.6.3.0 -rw-r--r-- 1 root root 112K Dec 17 2017 libvtkIOXMLParser-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkIOXMLParserPython27D-6.3.so.6.3 -> libvtkIOXMLParserPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 69K Dec 17 2017 libvtkIOXMLParserPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkIOXMLParserTCL-6.3.so.6.3 -> libvtkIOXMLParserTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 51K Dec 17 2017 libvtkIOXMLParserTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkIOXMLPython27D-6.3.so.6.3 -> libvtkIOXMLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 375K Dec 17 2017 libvtkIOXMLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOXMLTCL-6.3.so.6.3 -> libvtkIOXMLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 304K Dec 17 2017 libvtkIOXMLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkIOXdmf2-6.3.so.6.3 -> libvtkIOXdmf2-6.3.so.6.3.0 -rw-r--r-- 1 root root 335K Dec 17 2017 libvtkIOXdmf2-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOXdmf2Python27D-6.3.so.6.3 -> libvtkIOXdmf2Python27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 93K Dec 17 2017 libvtkIOXdmf2Python27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOXdmfIITCL-6.3.so.6.3 -> libvtkIOXdmfIITCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 67K Dec 17 2017 libvtkIOXdmfIITCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkImagingColor-6.3.so.6.3 -> libvtkImagingColor-6.3.so.6.3.0 -rw-r--r-- 1 root root 280K Dec 17 2017 libvtkImagingColor-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkImagingColorPython27D-6.3.so.6.3 -> libvtkImagingColorPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 85K Dec 17 2017 libvtkImagingColorPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkImagingColorTCL-6.3.so.6.3 -> libvtkImagingColorTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 75K Dec 17 2017 libvtkImagingColorTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkImagingCore-6.3.so.6.3 -> libvtkImagingCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.9M Dec 17 2017 libvtkImagingCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkImagingCorePython27D-6.3.so.6.3 -> libvtkImagingCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 934K Dec 17 2017 libvtkImagingCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingCoreTCL-6.3.so.6.3 -> libvtkImagingCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 625K Dec 17 2017 libvtkImagingCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingFourier-6.3.so.6.3 -> libvtkImagingFourier-6.3.so.6.3.0 -rw-r--r-- 1 root root 187K Dec 17 2017 libvtkImagingFourier-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkImagingFourierPython27D-6.3.so.6.3 -> libvtkImagingFourierPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 103K Dec 17 2017 libvtkImagingFourierPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkImagingFourierTCL-6.3.so.6.3 -> libvtkImagingFourierTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 87K Dec 17 2017 libvtkImagingFourierTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingGeneral-6.3.so.6.3 -> libvtkImagingGeneral-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.1M Dec 17 2017 libvtkImagingGeneral-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkImagingGeneralPython27D-6.3.so.6.3 -> libvtkImagingGeneralPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 378K Dec 17 2017 libvtkImagingGeneralPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkImagingGeneralTCL-6.3.so.6.3 -> libvtkImagingGeneralTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 348K Dec 17 2017 libvtkImagingGeneralTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkImagingHybrid-6.3.so.6.3 -> libvtkImagingHybrid-6.3.so.6.3.0 -rw-r--r-- 1 root root 505K Dec 17 2017 libvtkImagingHybrid-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkImagingHybridPython27D-6.3.so.6.3 -> libvtkImagingHybridPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 539K Dec 17 2017 libvtkImagingHybridPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkImagingHybridTCL-6.3.so.6.3 -> libvtkImagingHybridTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 315K Dec 17 2017 libvtkImagingHybridTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkImagingMath-6.3.so.6.3 -> libvtkImagingMath-6.3.so.6.3.0 -rw-r--r-- 1 root root 296K Dec 17 2017 libvtkImagingMath-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkImagingMathPython27D-6.3.so.6.3 -> libvtkImagingMathPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 143K Dec 17 2017 libvtkImagingMathPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingMathTCL-6.3.so.6.3 -> libvtkImagingMathTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 99K Dec 17 2017 libvtkImagingMathTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkImagingMorphological-6.3.so.6.3 -> libvtkImagingMorphological-6.3.so.6.3.0 -rw-r--r-- 1 root root 536K Dec 17 2017 libvtkImagingMorphological-6.3.so.6.3.0 lrwxrwxrwx 1 root root 48 Dec 17 2017 libvtkImagingMorphologicalPython27D-6.3.so.6.3 -> libvtkImagingMorphologicalPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 205K Dec 17 2017 libvtkImagingMorphologicalPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkImagingMorphologicalTCL-6.3.so.6.3 -> libvtkImagingMorphologicalTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 143K Dec 17 2017 libvtkImagingMorphologicalTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingSources-6.3.so.6.3 -> libvtkImagingSources-6.3.so.6.3.0 -rw-r--r-- 1 root root 331K Dec 17 2017 libvtkImagingSources-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkImagingSourcesPython27D-6.3.so.6.3 -> libvtkImagingSourcesPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 253K Dec 17 2017 libvtkImagingSourcesPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkImagingSourcesTCL-6.3.so.6.3 -> libvtkImagingSourcesTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 183K Dec 17 2017 libvtkImagingSourcesTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkImagingStatistics-6.3.so.6.3 -> libvtkImagingStatistics-6.3.so.6.3.0 -rw-r--r-- 1 root root 156K Dec 17 2017 libvtkImagingStatistics-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkImagingStatisticsPython27D-6.3.so.6.3 -> libvtkImagingStatisticsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 138K Dec 17 2017 libvtkImagingStatisticsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkImagingStatisticsTCL-6.3.so.6.3 -> libvtkImagingStatisticsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 99K Dec 17 2017 libvtkImagingStatisticsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingStencil-6.3.so.6.3 -> libvtkImagingStencil-6.3.so.6.3.0 -rw-r--r-- 1 root root 212K Dec 17 2017 libvtkImagingStencil-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkImagingStencilPython27D-6.3.so.6.3 -> libvtkImagingStencilPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 147K Dec 17 2017 libvtkImagingStencilPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkImagingStencilTCL-6.3.so.6.3 -> libvtkImagingStencilTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 107K Dec 17 2017 libvtkImagingStencilTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkInfovisBoostGraphAlgorithms-6.3.so.6.3 -> libvtkInfovisBoostGraphAlgorithms-6.3.so.6.3.0 -rw-r--r-- 1 root root 477K Dec 17 2017 libvtkInfovisBoostGraphAlgorithms-6.3.so.6.3.0 lrwxrwxrwx 1 root root 55 Dec 17 2017 libvtkInfovisBoostGraphAlgorithmsPython27D-6.3.so.6.3 -> libvtkInfovisBoostGraphAlgorithmsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 205K Dec 17 2017 libvtkInfovisBoostGraphAlgorithmsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 49 Dec 17 2017 libvtkInfovisBoostGraphAlgorithmsTCL-6.3.so.6.3 -> libvtkInfovisBoostGraphAlgorithmsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 163K Dec 17 2017 libvtkInfovisBoostGraphAlgorithmsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkInfovisCore-6.3.so.6.3 -> libvtkInfovisCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 1008K Dec 17 2017 libvtkInfovisCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkInfovisCorePython27D-6.3.so.6.3 -> libvtkInfovisCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 752K Dec 17 2017 libvtkInfovisCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkInfovisCoreTCL-6.3.so.6.3 -> libvtkInfovisCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 536K Dec 17 2017 libvtkInfovisCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkInfovisLayout-6.3.so.6.3 -> libvtkInfovisLayout-6.3.so.6.3.0 -rw-r--r-- 1 root root 670K Dec 17 2017 libvtkInfovisLayout-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkInfovisLayoutPython27D-6.3.so.6.3 -> libvtkInfovisLayoutPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 897K Dec 17 2017 libvtkInfovisLayoutPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkInfovisLayoutTCL-6.3.so.6.3 -> libvtkInfovisLayoutTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 600K Dec 17 2017 libvtkInfovisLayoutTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkInteractionImage-6.3.so.6.3 -> libvtkInteractionImage-6.3.so.6.3.0 -rw-r--r-- 1 root root 152K Dec 17 2017 libvtkInteractionImage-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkInteractionImagePython27D-6.3.so.6.3 -> libvtkInteractionImagePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 144K Dec 17 2017 libvtkInteractionImagePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkInteractionImageTCL-6.3.so.6.3 -> libvtkInteractionImageTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 91K Dec 17 2017 libvtkInteractionImageTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkInteractionStyle-6.3.so.6.3 -> libvtkInteractionStyle-6.3.so.6.3.0 -rw-r--r-- 1 root root 537K Dec 17 2017 libvtkInteractionStyle-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkInteractionStylePython27D-6.3.so.6.3 -> libvtkInteractionStylePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 349K Dec 17 2017 libvtkInteractionStylePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkInteractionStyleTCL-6.3.so.6.3 -> libvtkInteractionStyleTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 219K Dec 17 2017 libvtkInteractionStyleTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkInteractionWidgets-6.3.so.6.3 -> libvtkInteractionWidgets-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.2M Dec 17 2017 libvtkInteractionWidgets-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkInteractionWidgetsPython27D-6.3.so.6.3 -> libvtkInteractionWidgetsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.6M Dec 17 2017 libvtkInteractionWidgetsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkInteractionWidgetsTCL-6.3.so.6.3 -> libvtkInteractionWidgetsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.1M Dec 17 2017 libvtkInteractionWidgetsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkLocalExample-6.3.so.6.3 -> libvtkLocalExample-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkLocalExample-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkLocalExampleTCL-6.3.so.6.3 -> libvtkLocalExampleTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkLocalExampleTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkParallelCore-6.3.so.6.3 -> libvtkParallelCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 305K Dec 17 2017 libvtkParallelCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkParallelCorePython27D-6.3.so.6.3 -> libvtkParallelCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 376K Dec 17 2017 libvtkParallelCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkParallelCoreTCL-6.3.so.6.3 -> libvtkParallelCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 152K Dec 17 2017 libvtkParallelCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkParallelMPI-6.3.so.6.3 -> libvtkParallelMPI-6.3.so.6.3.0 -rw-r--r-- 1 root root 149K Dec 17 2017 libvtkParallelMPI-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkParallelMPI4Py-6.3.so.6.3 -> libvtkParallelMPI4Py-6.3.so.6.3.0 -rw-r--r-- 1 root root 68K Dec 17 2017 libvtkParallelMPI4Py-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkParallelMPI4PyPython27D-6.3.so.6.3 -> libvtkParallelMPI4PyPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkParallelMPI4PyPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkParallelMPIPython27D-6.3.so.6.3 -> libvtkParallelMPIPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 73K Dec 17 2017 libvtkParallelMPIPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkParallelMPITCL-6.3.so.6.3 -> libvtkParallelMPITCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 35K Dec 17 2017 libvtkParallelMPITCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkPythonInterpreter-6.3.so.6.3 -> libvtkPythonInterpreter-6.3.so.6.3.0 -rw-r--r-- 1 root root 40K Dec 17 2017 libvtkPythonInterpreter-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkPythonInterpreterPython27D-6.3.so.6.3 -> libvtkPythonInterpreterPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 28K Dec 17 2017 libvtkPythonInterpreterPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkPythonInterpreterTCL-6.3.so.6.3 -> libvtkPythonInterpreterTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkPythonInterpreterTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkRenderingAnnotation-6.3.so.6.3 -> libvtkRenderingAnnotation-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.3M Dec 17 2017 libvtkRenderingAnnotation-6.3.so.6.3.0 lrwxrwxrwx 1 root root 47 Dec 17 2017 libvtkRenderingAnnotationPython27D-6.3.so.6.3 -> libvtkRenderingAnnotationPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.9M Dec 17 2017 libvtkRenderingAnnotationPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkRenderingAnnotationTCL-6.3.so.6.3 -> libvtkRenderingAnnotationTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 945K Dec 17 2017 libvtkRenderingAnnotationTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkRenderingContext2D-6.3.so.6.3 -> libvtkRenderingContext2D-6.3.so.6.3.0 -rw-r--r-- 1 root root 252K Dec 17 2017 libvtkRenderingContext2D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkRenderingContext2DPython27D-6.3.so.6.3 -> libvtkRenderingContext2DPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 320K Dec 17 2017 libvtkRenderingContext2DPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkRenderingContextIIDTCL-6.3.so.6.3 -> libvtkRenderingContextIIDTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 212K Dec 17 2017 libvtkRenderingContextIIDTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkRenderingContextOpenGL-6.3.so.6.3 -> libvtkRenderingContextOpenGL-6.3.so.6.3.0 -rw-r--r-- 1 root root 196K Dec 17 2017 libvtkRenderingContextOpenGL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 50 Dec 17 2017 libvtkRenderingContextOpenGLPython27D-6.3.so.6.3 -> libvtkRenderingContextOpenGLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkRenderingContextOpenGLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkRenderingContextOpenGLTCL-6.3.so.6.3 -> libvtkRenderingContextOpenGLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkRenderingContextOpenGLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkRenderingCore-6.3.so.6.3 -> libvtkRenderingCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.5M Dec 17 2017 libvtkRenderingCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkRenderingCorePython27D-6.3.so.6.3 -> libvtkRenderingCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.6M Dec 17 2017 libvtkRenderingCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkRenderingCoreTCL-6.3.so.6.3 -> libvtkRenderingCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.2M Dec 17 2017 libvtkRenderingCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkRenderingExternal-6.3.so.6.3 -> libvtkRenderingExternal-6.3.so.6.3.0 -rw-r--r-- 1 root root 236K Dec 17 2017 libvtkRenderingExternal-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkRenderingExternalPython27D-6.3.so.6.3 -> libvtkRenderingExternalPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 86K Dec 17 2017 libvtkRenderingExternalPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkRenderingExternalTCL-6.3.so.6.3 -> libvtkRenderingExternalTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkRenderingExternalTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkRenderingFreeType-6.3.so.6.3 -> libvtkRenderingFreeType-6.3.so.6.3.0 -rw-r--r-- 1 root root 781K Dec 17 2017 libvtkRenderingFreeType-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkRenderingFreeTypeFontConfig-6.3.so.6.3 -> libvtkRenderingFreeTypeFontConfig-6.3.so.6.3.0 -rw-r--r-- 1 root root 39K Dec 17 2017 libvtkRenderingFreeTypeFontConfig-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkRenderingFreeTypePython27D-6.3.so.6.3 -> libvtkRenderingFreeTypePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 61K Dec 17 2017 libvtkRenderingFreeTypePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkRenderingFreeTypeTCL-6.3.so.6.3 -> libvtkRenderingFreeTypeTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkRenderingFreeTypeTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkRenderingGL2PS-6.3.so.6.3 -> libvtkRenderingGL2PS-6.3.so.6.3.0 -rw-r--r-- 1 root root 112K Dec 17 2017 libvtkRenderingGL2PS-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkRenderingGL2PSPython27D-6.3.so.6.3 -> libvtkRenderingGL2PSPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 23K Dec 17 2017 libvtkRenderingGL2PSPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkRenderingGLtoPSTCL-6.3.so.6.3 -> libvtkRenderingGLtoPSTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 19K Dec 17 2017 libvtkRenderingGLtoPSTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkRenderingImage-6.3.so.6.3 -> libvtkRenderingImage-6.3.so.6.3.0 -rw-r--r-- 1 root root 136K Dec 17 2017 libvtkRenderingImage-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkRenderingImagePython27D-6.3.so.6.3 -> libvtkRenderingImagePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 110K Dec 17 2017 libvtkRenderingImagePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkRenderingImageTCL-6.3.so.6.3 -> libvtkRenderingImageTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkRenderingImageTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkRenderingLIC-6.3.so.6.3 -> libvtkRenderingLIC-6.3.so.6.3.0 -rw-r--r-- 1 root root 647K Dec 17 2017 libvtkRenderingLIC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkRenderingLICPython27D-6.3.so.6.3 -> libvtkRenderingLICPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 164K Dec 17 2017 libvtkRenderingLICPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkRenderingLICTCL-6.3.so.6.3 -> libvtkRenderingLICTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 99K Dec 17 2017 libvtkRenderingLICTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkRenderingLOD-6.3.so.6.3 -> libvtkRenderingLOD-6.3.so.6.3.0 -rw-r--r-- 1 root root 99K Dec 17 2017 libvtkRenderingLOD-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkRenderingLODPython27D-6.3.so.6.3 -> libvtkRenderingLODPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 106K Dec 17 2017 libvtkRenderingLODPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkRenderingLODTCL-6.3.so.6.3 -> libvtkRenderingLODTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 59K Dec 17 2017 libvtkRenderingLODTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkRenderingLabel-6.3.so.6.3 -> libvtkRenderingLabel-6.3.so.6.3.0 -rw-r--r-- 1 root root 689K Dec 17 2017 libvtkRenderingLabel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkRenderingLabelPython27D-6.3.so.6.3 -> libvtkRenderingLabelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 434K Dec 17 2017 libvtkRenderingLabelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkRenderingLabelTCL-6.3.so.6.3 -> libvtkRenderingLabelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 239K Dec 17 2017 libvtkRenderingLabelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkRenderingMatplotlib-6.3.so.6.3 -> libvtkRenderingMatplotlib-6.3.so.6.3.0 -rw-r--r-- 1 root root 67K Dec 17 2017 libvtkRenderingMatplotlib-6.3.so.6.3.0 lrwxrwxrwx 1 root root 47 Dec 17 2017 libvtkRenderingMatplotlibPython27D-6.3.so.6.3 -> libvtkRenderingMatplotlibPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 23K Dec 17 2017 libvtkRenderingMatplotlibPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkRenderingMatplotlibTCL-6.3.so.6.3 -> libvtkRenderingMatplotlibTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 23K Dec 17 2017 libvtkRenderingMatplotlibTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkRenderingOpenGL-6.3.so.6.3 -> libvtkRenderingOpenGL-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.7M Dec 17 2017 libvtkRenderingOpenGL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkRenderingOpenGLPython27D-6.3.so.6.3 -> libvtkRenderingOpenGLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 983K Dec 17 2017 libvtkRenderingOpenGLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkRenderingOpenGLTCL-6.3.so.6.3 -> libvtkRenderingOpenGLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 711K Dec 17 2017 libvtkRenderingOpenGLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkRenderingParallel-6.3.so.6.3 -> libvtkRenderingParallel-6.3.so.6.3.0 -rw-r--r-- 1 root root 341K Dec 17 2017 libvtkRenderingParallel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkRenderingParallelLIC-6.3.so.6.3 -> libvtkRenderingParallelLIC-6.3.so.6.3.0 -rw-r--r-- 1 root root 621K Dec 17 2017 libvtkRenderingParallelLIC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 48 Dec 17 2017 libvtkRenderingParallelLICPython27D-6.3.so.6.3 -> libvtkRenderingParallelLICPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 48K Dec 17 2017 libvtkRenderingParallelLICPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkRenderingParallelLICTCL-6.3.so.6.3 -> libvtkRenderingParallelLICTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 39K Dec 17 2017 libvtkRenderingParallelLICTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkRenderingParallelPython27D-6.3.so.6.3 -> libvtkRenderingParallelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 284K Dec 17 2017 libvtkRenderingParallelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkRenderingParallelTCL-6.3.so.6.3 -> libvtkRenderingParallelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 179K Dec 17 2017 libvtkRenderingParallelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkRenderingTkTCL-6.3.so.6.3 -> libvtkRenderingTkTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 104K Dec 17 2017 libvtkRenderingTkTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkRenderingVolume-6.3.so.6.3 -> libvtkRenderingVolume-6.3.so.6.3.0 -rw-r--r-- 1 root root 4.2M Dec 17 2017 libvtkRenderingVolume-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkRenderingVolumeAMR-6.3.so.6.3 -> libvtkRenderingVolumeAMR-6.3.so.6.3.0 -rw-r--r-- 1 root root 87K Dec 17 2017 libvtkRenderingVolumeAMR-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkRenderingVolumeAMRPython27D-6.3.so.6.3 -> libvtkRenderingVolumeAMRPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 69K Dec 17 2017 libvtkRenderingVolumeAMRPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkRenderingVolumeAMRTCL-6.3.so.6.3 -> libvtkRenderingVolumeAMRTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 39K Dec 17 2017 libvtkRenderingVolumeAMRTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkRenderingVolumeOpenGL-6.3.so.6.3 -> libvtkRenderingVolumeOpenGL-6.3.so.6.3.0 -rw-r--r-- 1 root root 670K Dec 17 2017 libvtkRenderingVolumeOpenGL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 49 Dec 17 2017 libvtkRenderingVolumeOpenGLPython27D-6.3.so.6.3 -> libvtkRenderingVolumeOpenGLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 127K Dec 17 2017 libvtkRenderingVolumeOpenGLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkRenderingVolumeOpenGLTCL-6.3.so.6.3 -> libvtkRenderingVolumeOpenGLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 91K Dec 17 2017 libvtkRenderingVolumeOpenGLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkRenderingVolumePython27D-6.3.so.6.3 -> libvtkRenderingVolumePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.1M Dec 17 2017 libvtkRenderingVolumePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkRenderingVolumeTCL-6.3.so.6.3 -> libvtkRenderingVolumeTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 620K Dec 17 2017 libvtkRenderingVolumeTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkTestingGenericBridge-6.3.so.6.3 -> libvtkTestingGenericBridge-6.3.so.6.3.0 -rw-r--r-- 1 root root 132K Dec 17 2017 libvtkTestingGenericBridge-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkTestingIOSQL-6.3.so.6.3 -> libvtkTestingIOSQL-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkTestingIOSQL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkTestingRendering-6.3.so.6.3 -> libvtkTestingRendering-6.3.so.6.3.0 -rw-r--r-- 1 root root 136K Dec 17 2017 libvtkTestingRendering-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkTestingRenderingPython27D-6.3.so.6.3 -> libvtkTestingRenderingPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 61K Dec 17 2017 libvtkTestingRenderingPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkTestingRenderingTCL-6.3.so.6.3 -> libvtkTestingRenderingTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 43K Dec 17 2017 libvtkTestingRenderingTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 23 Dec 17 2017 libvtkVPIC-6.3.so.6.3 -> libvtkVPIC-6.3.so.6.3.0 -rw-r--r-- 1 root root 95K Dec 17 2017 libvtkVPIC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkViewsContext2D-6.3.so.6.3 -> libvtkViewsContext2D-6.3.so.6.3.0 -rw-r--r-- 1 root root 75K Dec 17 2017 libvtkViewsContext2D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkViewsContext2DPython27D-6.3.so.6.3 -> libvtkViewsContext2DPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 32K Dec 17 2017 libvtkViewsContext2DPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkViewsContextIIDTCL-6.3.so.6.3 -> libvtkViewsContextIIDTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkViewsContextIIDTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkViewsCore-6.3.so.6.3 -> libvtkViewsCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 172K Dec 17 2017 libvtkViewsCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkViewsCorePython27D-6.3.so.6.3 -> libvtkViewsCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 181K Dec 17 2017 libvtkViewsCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkViewsCoreTCL-6.3.so.6.3 -> libvtkViewsCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 119K Dec 17 2017 libvtkViewsCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkViewsGeovis-6.3.so.6.3 -> libvtkViewsGeovis-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkViewsGeovis-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkViewsGeovisPython27D-6.3.so.6.3 -> libvtkViewsGeovisPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 32K Dec 17 2017 libvtkViewsGeovisPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkViewsGeovisTCL-6.3.so.6.3 -> libvtkViewsGeovisTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkViewsGeovisTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkViewsInfovis-6.3.so.6.3 -> libvtkViewsInfovis-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.1M Dec 17 2017 libvtkViewsInfovis-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkViewsInfovisPython27D-6.3.so.6.3 -> libvtkViewsInfovisPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 927K Dec 17 2017 libvtkViewsInfovisPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkViewsInfovisTCL-6.3.so.6.3 -> libvtkViewsInfovisTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 565K Dec 17 2017 libvtkViewsInfovisTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkWrappingJava-6.3.so.6.3 -> libvtkWrappingJava-6.3.so.6.3.0 -rw-r--r-- 1 root root 19K Dec 17 2017 libvtkWrappingJava-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkWrappingPython27Core-6.3.so.6.3 -> libvtkWrappingPython27Core-6.3.so.6.3.0 -rw-r--r-- 1 root root 180K Dec 17 2017 libvtkWrappingPython27Core-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkalglib-6.3.so.6.3 -> libvtkalglib-6.3.so.6.3.0 -rw-r--r-- 1 root root 139K Dec 17 2017 libvtkalglib-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkexoIIc-6.3.so.6.3 -> libvtkexoIIc-6.3.so.6.3.0 -rw-r--r-- 1 root root 288K Dec 17 2017 libvtkexoIIc-6.3.so.6.3.0 lrwxrwxrwx 1 root root 23 Dec 17 2017 libvtkftgl-6.3.so.6.3 -> libvtkftgl-6.3.so.6.3.0 -rw-r--r-- 1 root root 43K Dec 17 2017 libvtkftgl-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkmetaio-6.3.so.6.3 -> libvtkmetaio-6.3.so.6.3.0 -rw-r--r-- 1 root root 591K Dec 17 2017 libvtkmetaio-6.3.so.6.3.0 lrwxrwxrwx 1 root root 22 Dec 17 2017 libvtksys-6.3.so.6.3 -> libvtksys-6.3.so.6.3.0 -rw-r--r-- 1 root root 278K Dec 17 2017 libvtksys-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkverdict-6.3.so.6.3 -> libvtkverdict-6.3.so.6.3.0 -rw-r--r-- 1 root root 195K Dec 17 2017 libvtkverdict-6.3.so.6.3.0 lrwxrwxrwx 1 root root 24 Dec 17 2017 libvtkxdmf2-6.3.so.6.3 -> libvtkxdmf2-6.3.so.6.3.0 -rw-r--r-- 1 root root 472K Dec 17 2017 libvtkxdmf2-6.3.so.6.3.0 lrwxrwxrwx 1 root root 19 Jan 5 2021 libwavpack.so.1 -> libwavpack.so.1.2.0 -rw-r--r-- 1 root root 167K Jan 5 2021 libwavpack.so.1.2.0 lrwxrwxrwx 1 root root 26 Feb 1 2020 libwayland-client.so.0 -> libwayland-client.so.0.3.0 -rw-r--r-- 1 root root 60K Feb 1 2020 libwayland-client.so.0.3.0 lrwxrwxrwx 1 root root 26 Feb 1 2020 libwayland-cursor.so.0 -> libwayland-cursor.so.0.0.0 -rw-r--r-- 1 root root 31K Feb 1 2020 libwayland-cursor.so.0.0.0 lrwxrwxrwx 1 root root 23 Feb 1 2020 libwayland-egl.so.1 -> libwayland-egl.so.1.0.0 -rw-r--r-- 1 root root 5.9K Feb 1 2020 libwayland-egl.so.1.0.0 lrwxrwxrwx 1 root root 16 May 20 2021 libwebp.so.6 -> libwebp.so.6.0.2 -rw-r--r-- 1 root root 411K May 20 2021 libwebp.so.6.0.2 lrwxrwxrwx 1 root root 19 May 20 2021 libwebpmux.so.3 -> libwebpmux.so.3.0.1 -rw-r--r-- 1 root root 39K May 20 2021 libwebpmux.so.3.0.1 lrwxrwxrwx 1 root root 16 Dec 15 2017 libwind.so.0 -> libwind.so.0.0.0 -rw-r--r-- 1 root root 162K Dec 15 2017 libwind.so.0.0.0 -rw-r--r-- 1 root root 1.2M Jan 19 2018 libx264.so.152 -rw-r--r-- 1 root root 11M Dec 31 2017 libx265.so.146 lrwxrwxrwx 1 root root 20 Jun 21 2018 libxcb-dri2.so.0 -> libxcb-dri2.so.0.0.0 -rw-r--r-- 1 root root 19K Jun 21 2018 libxcb-dri2.so.0.0.0 lrwxrwxrwx 1 root root 20 Jun 21 2018 libxcb-dri3.so.0 -> libxcb-dri3.so.0.0.0 -rw-r--r-- 1 root root 15K Jun 21 2018 libxcb-dri3.so.0.0.0 lrwxrwxrwx 1 root root 19 Jun 21 2018 libxcb-glx.so.0 -> libxcb-glx.so.0.0.0 -rw-r--r-- 1 root root 107K Jun 21 2018 libxcb-glx.so.0.0.0 lrwxrwxrwx 1 root root 23 Jun 21 2018 libxcb-present.so.0 -> libxcb-present.so.0.0.0 -rw-r--r-- 1 root root 11K Jun 21 2018 libxcb-present.so.0.0.0 lrwxrwxrwx 1 root root 22 Jun 21 2018 libxcb-render.so.0 -> libxcb-render.so.0.0.0 -rw-r--r-- 1 root root 51K Jun 21 2018 libxcb-render.so.0.0.0 lrwxrwxrwx 1 root root 19 Jun 21 2018 libxcb-shm.so.0 -> libxcb-shm.so.0.0.0 -rw-r--r-- 1 root root 11K Jun 21 2018 libxcb-shm.so.0.0.0 lrwxrwxrwx 1 root root 20 Jun 21 2018 libxcb-sync.so.1 -> libxcb-sync.so.1.0.0 -rw-r--r-- 1 root root 27K Jun 21 2018 libxcb-sync.so.1.0.0 lrwxrwxrwx 1 root root 15 Jun 21 2018 libxcb.so.1 -> libxcb.so.1.1.0 -rw-r--r-- 1 root root 159K Jun 21 2018 libxcb.so.1.1.0 lrwxrwxrwx 1 root root 16 Mar 24 2018 libxdot.so -> libxdot.so.4.0.0 lrwxrwxrwx 1 root root 16 Mar 24 2018 libxdot.so.4 -> libxdot.so.4.0.0 -rw-r--r-- 1 root root 23K Mar 24 2018 libxdot.so.4.0.0 -rw-r--r-- 1 root root 3.5M Nov 10 2017 libxerces-c-3.2.so lrwxrwxrwx 1 root root 21 Mar 5 2019 libxkbcommon.so.0 -> libxkbcommon.so.0.0.0 -rw-r--r-- 1 root root 251K Mar 5 2019 libxkbcommon.so.0.0.0 -rw-r--r-- 1 root root 2.9M Aug 1 14:25 libxml2.a lrwxrwxrwx 1 root root 16 Aug 1 14:25 libxml2.so -> libxml2.so.2.9.4 lrwxrwxrwx 1 root root 16 Aug 1 14:25 libxml2.so.2 -> libxml2.so.2.9.4 -rw-r--r-- 1 root root 1.8M Aug 1 14:25 libxml2.so.2.9.4 lrwxrwxrwx 1 root root 21 Mar 18 2018 libxshmfence.so.1 -> libxshmfence.so.1.0.0 -rw-r--r-- 1 root root 6.1K Mar 18 2018 libxshmfence.so.1.0.0 lrwxrwxrwx 1 root root 18 Jan 28 2018 libxvidcore.so.4 -> libxvidcore.so.4.3 -rw-r--r-- 1 root root 672K Jan 28 2018 libxvidcore.so.4.3 -rw-r--r-- 1 root root 162K Mar 26 2022 libz.a lrwxrwxrwx 1 root root 36 Mar 26 2022 libz.so -> /lib/x86_64-linux-gnu/libz.so.1.2.11 lrwxrwxrwx 1 root root 16 Mar 3 2021 libzstd.so.1 -> libzstd.so.1.3.3 -rw-r--r-- 1 root root 490K Mar 3 2021 libzstd.so.1.3.3 lrwxrwxrwx 1 root root 23 Nov 17 2016 libzvbi-chains.so.0 -> libzvbi-chains.so.0.0.0 -rw-r--r-- 1 root root 58K Nov 17 2016 libzvbi-chains.so.0.0.0 lrwxrwxrwx 1 root root 17 Nov 17 2016 libzvbi.so.0 -> libzvbi.so.0.13.2 -rw-r--r-- 1 root root 556K Nov 17 2016 libzvbi.so.0.13.2 drwxr-xr-x 2 root root 4.0K Aug 16 08:41 nss drwxr-xr-x 2 root root 4.0K Aug 16 08:41 odbc drwxr-xr-x 2 root root 4.0K Aug 16 08:41 openblas drwxr-xr-x 3 root root 4.0K Aug 16 08:41 openmpi drwxr-xr-x 1 root root 4.0K Aug 16 08:40 perl drwxr-xr-x 16 root root 4.0K Aug 1 13:21 perl-base drwxr-xr-x 2 root root 4.0K Aug 16 08:41 pkgconfig -rw-r--r-- 1 root root 1.8K May 3 10:19 rcrt1.o drwxr-xr-x 2 root root 4.0K Aug 16 08:41 rsocket drwxr-xr-x 2 root root 4.0K Aug 16 08:41 sasl2 drwxr-xr-x 2 root root 4.0K Aug 16 08:41 vdpau drwxr-xr-x 2 root root 4.0K Aug 16 08:41 x264-10bit -rw-r--r-- 1 root root 204 Aug 1 14:25 xml2Conf.sh [Pipeline] echo Packing build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so into dgl-cpu-linux [Pipeline] stash Stashed 4 file(s) Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 856b09e8bd127c677672426c8af0a6bfefd91dd73a4c28827d4d41ef0f5d0fcc $ docker rm -f 856b09e8bd127c677672426c8af0a6bfefd91dd73a4c28827d4d41ef0f5d0fcc [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } [Pipeline] // stage [Pipeline] } Archiving objects > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/obj/collectives/device/colldevice.a Linking libnccl.so.2.11.4 > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/lib/libnccl.so.2.11.4 Archiving libnccl_static.a > /root/jenkins/workspace/dgl_PR-4648@2/build/nccl/lib/libnccl_static.a /root/jenkins/workspace/dgl_PR-4648@2/third_party/nccl/src [ 49%] No install step for 'nccl_external' [ 49%] Completed 'nccl_external' [ 49%] Built target nccl_external [ 49%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/runtime/cuda/dgl_generated_nccl_api.cu.o [ 49%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_cumsum.cu.o [ 49%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_index_select.cu.o [ 49%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_nonzero.cu.o [ 49%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_op_impl.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_scatter.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_csr_get_data.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_sort.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_csr_mm.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_coo2csr.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_coo_sort.cu.o [ 51%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_csr2coo.cu.o [ 51%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_csr_sort.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_csr_sum.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_cuda_filter.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_disjoint_union.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_gather_mm.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_negative_sampling.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_rowwise_sampling_prob.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_rowwise_sampling.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_sddmm_hetero_coo.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_sddmm.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_spmat_op_impl_csr.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_segment_reduce.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_spmat_op_impl_coo.cu.o [ 55%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_sddmm_hetero_csr.cu.o [ 56%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/geometry/cuda/dgl_generated_geometry_op_impl.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_spmm.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_spmm_hetero.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_utils.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/uvm/dgl_generated_array_index_select_uvm.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/dgl_generated_frequency_hashmap.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/geometry/cuda/dgl_generated_edge_coarsening_impl.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/dgl_generated_get_node_types_gpu.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/dgl_generated_randomwalk_gpu.cu.o [ 59%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/transform/cuda/dgl_generated_cuda_compact_graph.cu.o [ 59%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/transform/cuda/dgl_generated_cuda_to_block.cu.o [ 59%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/partition/cuda/dgl_generated_partition_op.cu.o [ 59%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/transform/cuda/dgl_generated_knn.cu.o [ 60%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/runtime/cuda/dgl_generated_cuda_hashtable.cu.o /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int32_t, DType=float]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int32_t, DType=float]" (444): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int64_t, DType=float]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int64_t, DType=float]" (447): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int32_t, DType=double]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int32_t, DType=double]" (450): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int64_t, DType=double]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int64_t, DType=double]" (453): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int32_t, DType=float]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int32_t, DType=float]" (444): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int64_t, DType=float]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int64_t, DType=float]" (447): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int32_t, DType=double]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int32_t, DType=double]" (450): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int64_t, DType=double]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int64_t, DType=double]" (453): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int32_t, DType=float]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int32_t, DType=float]" (444): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int64_t, DType=float]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int64_t, DType=float]" (447): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int32_t, DType=double]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int32_t, DType=double]" (450): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int64_t, DType=double]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int64_t, DType=double]" (453): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int32_t, DType=float]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int32_t, DType=float]" (444): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int64_t, DType=float]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int64_t, DType=float]" (447): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int32_t, DType=double]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int32_t, DType=double]" (450): here /root/jenkins/workspace/dgl_PR-4648@2/src/array/cuda/gather_mm.cu(181): warning: variable "out_reg" was declared but never referenced detected during: instantiation of "void dgl::aten::cuda::GatherMMScatterKernel2(const DType *, const DType *, DType *, const Idx *, const Idx *, const Idx *, int64_t, int64_t, int64_t) [with Idx=int64_t, DType=double]" (391): here instantiation of "void dgl::aten::GatherMMScatter(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray) [with XPU=2, IdType=int64_t, DType=double]" (453): here nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). Archiving objects > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/obj/collectives/device/colldevice.a Linking libnccl.so.2.11.4 > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/lib/libnccl.so.2.11.4 Archiving libnccl_static.a > /root/jenkins/workspace/dgl_PR-4648@3/build/nccl/lib/libnccl_static.a /root/jenkins/workspace/dgl_PR-4648@3/third_party/nccl/src [ 49%] No install step for 'nccl_external' [ 49%] Completed 'nccl_external' [ 49%] Built target nccl_external [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_index_select.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_nonzero.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_scatter.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_sort.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_coo2csr.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_op_impl.cu.o [ 50%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_coo_sort.cu.o [ 51%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_csr2coo.cu.o [ 51%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_csr_get_data.cu.o [ 51%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_array_cumsum.cu.o [ 51%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_cuda_filter.cu.o [ 51%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_rowwise_sampling_prob.cu.o [ 51%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_gather_mm.cu.o [ 51%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_sddmm.cu.o [ 51%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_sddmm_hetero_coo.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_sddmm_hetero_csr.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_segment_reduce.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_csr_sort.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/runtime/cuda/dgl_generated_nccl_api.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_csr_mm.cu.o [ 53%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_spmat_op_impl_csr.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_spmm.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_spmm_hetero.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_disjoint_union.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_utils.cu.o [ 54%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_negative_sampling.cu.o [ 55%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/uvm/dgl_generated_array_index_select_uvm.cu.o [ 55%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_csr_sum.cu.o [ 56%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_rowwise_sampling.cu.o [ 56%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/dgl_generated_frequency_hashmap.cu.o [ 56%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/geometry/cuda/dgl_generated_edge_coarsening_impl.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/geometry/cuda/dgl_generated_geometry_op_impl.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/dgl_generated_get_node_types_gpu.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/dgl_generated_randomwalk_gpu.cu.o [ 57%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/array/cuda/dgl_generated_spmat_op_impl_coo.cu.o [ 59%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/transform/cuda/dgl_generated_cuda_compact_graph.cu.o [ 59%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/transform/cuda/dgl_generated_knn.cu.o [ 59%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/graph/transform/cuda/dgl_generated_cuda_to_block.cu.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [ 59%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/partition/cuda/dgl_generated_partition_op.cu.o [ 60%] Building NVCC (Device) object CMakeFiles/dgl.dir/src/runtime/cuda/dgl_generated_cuda_hashtable.cu.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "ldc" was declared but never referenced nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(203): warning #177-D: variable "arr_len" was declared but never referenced detected during instantiation of "void dgl::aten::SpMMCsr(const std::__cxx11::string &, const std::__cxx11::string &, const dgl::BcastOff &, const dgl::aten::CSRMatrix &, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, std::vector>) [with XPU=2, IdType=int32_t, DType=__half]" /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/spmm.cu(114): here nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /root/jenkins/workspace/dgl_PR-4648@3/src/graph/transform/cuda/cuda_to_block.cu(123): warning #177-D: variable "num_ntypes" was declared but never referenced detected during instantiation of "std::tuple>> dgl::transform::::ToBlockGPU(dgl::HeteroGraphPtr, const std::vector> &, __nv_bool, std::vector> *) [with IdType=int32_t]" (392): here /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(203): warning #177-D: variable "arr_len" was declared but never referenced detected during instantiation of "void dgl::aten::SpMMCsrHetero(const std::__cxx11::string &, const std::__cxx11::string &, const dgl::BcastOff &, const std::vector> &, const std::vector> &, const std::vector> &, std::vector> *, std::vector>, std::allocator>>> *, const std::vector> &, const std::vector> &) [with XPU=2, IdType=int32_t, DType=__half]" /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/spmm_hetero.cu(182): here nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/graph/sampling/randomwalks/frequency_hashmap.cu(105): warning #177-D: parameter "dst_data" was declared but never referenced detected during instantiation of "std::tuple dgl::sampling::impl::FrequencyHashmap::Topk(const IdxType *, const IdxType *, DGLDataType, int64_t, int64_t, int64_t) [with IdxType=int64_t]" (435): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/graph/transform/cuda/cuda_to_block.cu(123): warning #177-D: variable "num_ntypes" was declared but never referenced detected during instantiation of "std::tuple>> dgl::transform::::ToBlockGPU(dgl::HeteroGraphPtr, const std::vector> &, __nv_bool, std::vector> *) [with IdType=int32_t]" (392): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/graph/transform/cuda/cuda_to_block.cu(123): warning #177-D: variable "num_ntypes" was declared but never referenced detected during instantiation of "std::tuple>> dgl::transform::::ToBlockGPU(dgl::HeteroGraphPtr, const std::vector> &, __nv_bool, std::vector> *) [with IdType=int32_t]" (392): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/graph/transform/cuda/cuda_to_block.cu(123): warning #177-D: variable "num_ntypes" was declared but never referenced detected during instantiation of "std::tuple>> dgl::transform::::ToBlockGPU(dgl::HeteroGraphPtr, const std::vector> &, __nv_bool, std::vector> *) [with IdType=int32_t]" (392): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/graph/transform/cuda/cuda_to_block.cu(123): warning #177-D: variable "num_ntypes" was declared but never referenced detected during instantiation of "std::tuple>> dgl::transform::::ToBlockGPU(dgl::HeteroGraphPtr, const std::vector> &, __nv_bool, std::vector> *) [with IdType=int32_t]" (392): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(203): warning #177-D: variable "arr_len" was declared but never referenced detected during instantiation of "void dgl::aten::SpMMCsrHetero(const std::__cxx11::string &, const std::__cxx11::string &, const dgl::BcastOff &, const std::vector> &, const std::vector> &, const std::vector> &, std::vector> *, std::vector>, std::allocator>>> *, const std::vector> &, const std::vector> &) [with XPU=2, IdType=int32_t, DType=__half]" /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/spmm_hetero.cu(182): here /root/jenkins/workspace/dgl_PR-4648@3/src/graph/sampling/randomwalks/frequency_hashmap.cu(105): warning #177-D: parameter "dst_data" was declared but never referenced detected during instantiation of "std::tuple dgl::sampling::impl::FrequencyHashmap::Topk(const IdxType *, const IdxType *, DGLDataType, int64_t, int64_t, int64_t) [with IdxType=int64_t]" (435): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/graph/sampling/randomwalks/frequency_hashmap.cu(105): warning #177-D: parameter "dst_data" was declared but never referenced detected during instantiation of "std::tuple dgl::sampling::impl::FrequencyHashmap::Topk(const IdxType *, const IdxType *, DGLDataType, int64_t, int64_t, int64_t) [with IdxType=int64_t]" (435): here /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(203): warning #177-D: variable "arr_len" was declared but never referenced detected during instantiation of "void dgl::aten::SpMMCsrHetero(const std::__cxx11::string &, const std::__cxx11::string &, const dgl::BcastOff &, const std::vector> &, const std::vector> &, const std::vector> &, std::vector> *, std::vector>, std::allocator>>> *, const std::vector> &, const std::vector> &) [with XPU=2, IdType=int32_t, DType=__half]" /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/spmm_hetero.cu(182): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(203): warning #177-D: variable "arr_len" was declared but never referenced detected during instantiation of "void dgl::aten::SpMMCsr(const std::__cxx11::string &, const std::__cxx11::string &, const dgl::BcastOff &, const dgl::aten::CSRMatrix &, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, std::vector>) [with XPU=2, IdType=int32_t, DType=__half]" /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/spmm.cu(114): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/graph/sampling/randomwalks/frequency_hashmap.cu(105): warning #177-D: parameter "dst_data" was declared but never referenced detected during instantiation of "std::tuple dgl::sampling::impl::FrequencyHashmap::Topk(const IdxType *, const IdxType *, DGLDataType, int64_t, int64_t, int64_t) [with IdxType=int64_t]" (435): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(203): warning #177-D: variable "arr_len" was declared but never referenced detected during instantiation of "void dgl::aten::SpMMCsrHetero(const std::__cxx11::string &, const std::__cxx11::string &, const dgl::BcastOff &, const std::vector> &, const std::vector> &, const std::vector> &, std::vector> *, std::vector>, std::allocator>>> *, const std::vector> &, const std::vector> &) [with XPU=2, IdType=int32_t, DType=__half]" /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/spmm_hetero.cu(182): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/graph/sampling/randomwalks/frequency_hashmap.cu(105): warning #177-D: parameter "dst_data" was declared but never referenced detected during instantiation of "std::tuple dgl::sampling::impl::FrequencyHashmap::Topk(const IdxType *, const IdxType *, DGLDataType, int64_t, int64_t, int64_t) [with IdxType=int64_t]" (435): here /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(203): warning #177-D: variable "arr_len" was declared but never referenced detected during instantiation of "void dgl::aten::SpMMCsrHetero(const std::__cxx11::string &, const std::__cxx11::string &, const dgl::BcastOff &, const std::vector> &, const std::vector> &, const std::vector> &, std::vector> *, std::vector>, std::allocator>>> *, const std::vector> &, const std::vector> &) [with XPU=2, IdType=int32_t, DType=__half]" /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/spmm_hetero.cu(182): here /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(203): warning #177-D: variable "arr_len" was declared but never referenced detected during instantiation of "void dgl::aten::SpMMCsr(const std::__cxx11::string &, const std::__cxx11::string &, const dgl::BcastOff &, const dgl::aten::CSRMatrix &, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, std::vector>) [with XPU=2, IdType=int32_t, DType=__half]" /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/spmm.cu(114): here /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/random.h(28): warning #177-D: variable "guard" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(203): warning #177-D: variable "arr_len" was declared but never referenced detected during instantiation of "void dgl::aten::SpMMCsr(const std::__cxx11::string &, const std::__cxx11::string &, const dgl::BcastOff &, const dgl::aten::CSRMatrix &, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, std::vector>) [with XPU=2, IdType=int32_t, DType=__half]" /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/spmm.cu(114): here /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(56): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(57): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(58): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(59): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(60): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "handle" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(69): warning #177-D: parameter "transa" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "transb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "m" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(70): warning #177-D: parameter "n" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "alpha" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "A" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(71): warning #177-D: parameter "lda" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "beta" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "B" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(72): warning #177-D: parameter "ldb" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "C" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(73): warning #177-D: parameter "ldc" was declared but never referenced /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/./spmm.cuh(203): warning #177-D: variable "arr_len" was declared but never referenced detected during instantiation of "void dgl::aten::SpMMCsr(const std::__cxx11::string &, const std::__cxx11::string &, const dgl::BcastOff &, const dgl::aten::CSRMatrix &, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, std::vector>) [with XPU=2, IdType=int32_t, DType=__half]" /root/jenkins/workspace/dgl_PR-4648@3/src/array/cuda/spmm.cu(114): here Scanning dependencies of target dgl [ 60%] Building CXX object CMakeFiles/dgl.dir/src/array/array.cc.o [ 60%] Building CXX object CMakeFiles/dgl.dir/src/array/array_arith.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_cumsum.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_sort.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_index_select.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_nonzero.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_pack.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_op_impl.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_scatter.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_remove.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_sort.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_repeat.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_coalesce.cc.o [ 63%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_union.cc.o [ 63%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_remove.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_get_data.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_linegraph.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_sort.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_mm.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/negative_sampling.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_sum.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/gather_mm.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_to_simple.cc.o [ 67%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/spmat_op_impl_coo.cc.o [ 67%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/rowwise_topk.cc.o [ 67%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/disjoint_union.cc.o [ 68%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/rowwise_sampling.cc.o [ 68%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/segment_reduce.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/array/kernel.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/array/filter.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/spmat_op_impl_csr.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/spmm.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/sddmm.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/random/cpu/choice.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/geometry/geometry.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/partition/ndarray_partition.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/array/libra_partition.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/array/uvm_array.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/random/random.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/runtime/module_util.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/geometry/cpu/geometry_op_impl.cc.o [ 72%] Building CXX object CMakeFiles/dgl.dir/src/runtime/object.cc.o [ 72%] Building CXX object CMakeFiles/dgl.dir/src/runtime/module.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/bcast.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/array/union_partition.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/c_object_api.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/traversal.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/graph/gk_ops.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/dlpack_convert.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/config.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/dso_module.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/ndarray.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/cpu_device_api.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/thread_pool.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/c_api_common.cc.o [ 74%] Building CXX object CMakeFiles/dgl.dir/src/runtime/c_runtime_api.cc.o [ 74%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph.cc.o [ 75%] Building CXX object CMakeFiles/dgl.dir/src/runtime/file_util.cc.o [ 75%] Building CXX object CMakeFiles/dgl.dir/src/runtime/semaphore_wrapper.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/runtime/shared_mem.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/runtime/utils.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/runtime/threading_backend.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/runtime/registry.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/api/api_test.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/runtime/resource_manager.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/graph/creators.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/runtime/system_lib_module.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/graph/network.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph_apis.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/get_node_types_cpu.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/graph/heterograph.cc.o [ 80%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampler.cc.o [ 80%] Building CXX object CMakeFiles/dgl.dir/src/runtime/tensordispatch.cc.o [ 81%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/node2vec.cc.o [ 81%] Building CXX object CMakeFiles/dgl.dir/src/graph/heterograph_capi.cc.o [ 81%] Building CXX object CMakeFiles/dgl.dir/src/runtime/workspace_pool.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/nodeflow.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/metis_partition.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph_traversal.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/api/api_container.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph_op.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/immutable_graph.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/negative/global_uniform.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/pickle.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/node2vec_cpu.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/neighbor/neighbor.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/cpu/knn.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/heterograph_serialize.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/randomwalk_with_restart_cpu.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/randomwalk_cpu.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/tensor_serialize.cc.o [ 86%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/zerocopy_serializer.cc.o [ 86%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/line_graph.cc.o [ 86%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/compact.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/randomwalks.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/dglgraph_serialize.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/graph_serialize.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/knn.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/subgraph.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/remove_edges.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/shared_mem_manager.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/to_simple.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/metis_partition_hetero.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/union_partition.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/socket_pool.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/msg_queue.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/partition_hetero.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/socket_communicator.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/to_bipartite.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/graph/unit_graph.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/array/cuda/csr_transpose.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/scheduler/scheduler_apis.cc.o [ 90%] Building CXX object CMakeFiles/dgl.dir/src/graph/traversal.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/common.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/rpc/rpc.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/scheduler/scheduler.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/runtime/cuda/cuda_device_api.cc.o [ 92%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/tcp_socket.cc.o [ 92%] Building CXX object CMakeFiles/dgl.dir/src/rpc/tensorpipe/tp_communicator.cc.o In file included from /root/jenkins/workspace/dgl_PR-4648@2/src/geometry/../c_api_common.h:10:0, from /root/jenkins/workspace/dgl_PR-4648@2/src/geometry/geometry.cc:9: /root/jenkins/workspace/dgl_PR-4648@2/include/dgl/runtime/packed_func.h:502:21: warning: inline function 'TObjectRef dgl::runtime::DGLArgValue::AsObjectRef() const [with TObjectRef = dgl::HeteroGraphRef]' used but never defined inline TObjectRef AsObjectRef() const; ^~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648@2/src/partition/ndarray_partition.cc: In member function 'virtual int64_t dgl::partition::RangePartition::PartSize(int) const': /root/jenkins/workspace/dgl_PR-4648@2/src/partition/ndarray_partition.cc:202:3: warning: control reaches end of non-void function [-Wreturn-type] } ^ In file included from /root/jenkins/workspace/dgl_PR-4648@2/third_party/dmlc-core/include/dmlc/logging.h:132:0, from /root/jenkins/workspace/dgl_PR-4648@2/include/dgl/./runtime/object.h:9, from /root/jenkins/workspace/dgl_PR-4648@2/include/dgl/graph_interface.h:15, from /root/jenkins/workspace/dgl_PR-4648@2/include/dgl/sampler.h:13, from /root/jenkins/workspace/dgl_PR-4648@2/src/graph/sampler.cc:6: /root/jenkins/workspace/dgl_PR-4648@2/src/graph/sampler.cc: In member function 'dgl::NegSubgraph dgl::{anonymous}::EdgeSamplerObject::genNegEdgeSubgraph(const dgl::Subgraph&, const string&, int64_t, bool, bool)': /root/jenkins/workspace/dgl_PR-4648@2/src/graph/sampler.cc:1189:48: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] assert(prev_neg_offset + neg_sample_size == neg_vids.size()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648@2/src/graph/sampler.cc:1193:48: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] assert(prev_neg_offset + neg_sample_size == neg_vids.size()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648@2/src/array/libra_partition.cc: In function 'dgl::runtime::List dgl::aten::Libra2dglBuildDict(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, int32_t, int32_t, int64_t, const string&)': /root/jenkins/workspace/dgl_PR-4648@2/src/array/libra_partition.cc:396:11: warning: ignoring return value of 'int fscanf(FILE*, const char*, ...)', declared with attribute warn_unused_result [-Wunused-result] fscanf(fp, "%ld,%ld,%f\n", &u, &v, &w); // reading an edge - the src and dst global node IDs ~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648@2/src/array/libra_partition.cc: In function 'int32_t dgl::aten::Ver2partition(IdType, int64_t*, int32_t) [with IdType = long int]': /root/jenkins/workspace/dgl_PR-4648@2/src/array/libra_partition.cc:43:1: warning: control reaches end of non-void function [-Wreturn-type] } ^ [ 93%] Linking CXX shared library libdgl.so [ 93%] Built target dgl Scanning dependencies of target rpc_server Scanning dependencies of target rpc_client [ 95%] Building CXX object CMakeFiles/rpc_server.dir/tests/dist/cpp/rpc_server.cc.o [ 95%] Building CXX object CMakeFiles/rpc_client.dir/tests/dist/cpp/rpc_client.cc.o Scanning dependencies of target runUnitTests [ 95%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/graph_index_test.cc.o [ 95%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/message_queue_test.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/socket_communicator_test.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/string_test.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_aten.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_sampler.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_csrmm.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_serialize.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_partition.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_rowwise.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_spmm.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_spmat_csr.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_smart_ptr_serialize.cc.o [ 98%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_spmat_coo.cc.o [ 98%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_unit_graph.cc.o [100%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_zerocopy_serialize.cc.o /root/jenkins/workspace/dgl_PR-4648@2/tests/dist/cpp/rpc_client.cc: In member function 'void RPCClient::StartClient()': /root/jenkins/workspace/dgl_PR-4648@2/tests/dist/cpp/rpc_client.cc:50:15: warning: unused variable 'num_machines' [-Wunused-variable] const int num_machines = ips_.size(); ^~~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc: In instantiation of 'void _TestRemainder_MapToX() [with DGLDeviceType XPU = (DGLDeviceType)2; IdType = int]': /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:86:44: required from here /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:66:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < global->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:74:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < act_local->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc: In instantiation of 'void _TestRemainder_MapToX() [with DGLDeviceType XPU = (DGLDeviceType)2; IdType = long int]': /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:87:44: required from here /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:66:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < global->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:74:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < act_local->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc: In instantiation of 'void _TestRange_MapToX() [with DGLDeviceType XPU = (DGLDeviceType)2; IdType = int]': /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:195:40: required from here /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:176:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < global->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:184:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < act_local->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc: In instantiation of 'void _TestRange_MapToX() [with DGLDeviceType XPU = (DGLDeviceType)2; IdType = long int]': /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:196:40: required from here /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:176:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < global->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@2/tests/cpp/test_partition.cc:184:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < act_local->shape[0]; ++i) { [100%] Linking CXX executable rpc_server [100%] Linking CXX executable rpc_client [100%] Built target rpc_server [100%] Built target rpc_client [100%] Linking CXX executable runUnitTests [100%] Built target runUnitTests ~/jenkins/workspace/dgl_PR-4648@2 ~/jenkins/workspace/dgl_PR-4648@2/python ~/jenkins/workspace/dgl_PR-4648@2 WARNING: Skipping dgl as it is not installed. WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running install running bdist_egg running egg_info creating dgl.egg-info writing dgl.egg-info/PKG-INFO writing dependency_links to dgl.egg-info/dependency_links.txt writing requirements to dgl.egg-info/requires.txt writing top-level names to dgl.egg-info/top_level.txt writing manifest file 'dgl.egg-info/SOURCES.txt' reading manifest file 'dgl.egg-info/SOURCES.txt' writing manifest file 'dgl.egg-info/SOURCES.txt' installing library code to build/bdist.linux-x86_64/egg running install_lib running build_py creating build creating build/lib.linux-x86_64-3.7 creating build/lib.linux-x86_64-3.7/dgl copying dgl/partition.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/core.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/subgraph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/traversal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/base.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/sparse.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/global_config.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/_api_internal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/network.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/logging.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/graph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/udf.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/ndarray.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/init.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/view.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/generators.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/convert.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/container.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/merge.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/readout.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/__init__.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/propagate.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/batch.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/frame.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/random.py -> build/lib.linux-x86_64-3.7/dgl creating build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/sp_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/diag_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/reduction.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/__init__.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/elementwise_op_sp.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse creating build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/edge_coarsening.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/fps.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/capi.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/__init__.py -> build/lib.linux-x86_64-3.7/dgl/geometry creating build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/base.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/message.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/reducer.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/__init__.py -> build/lib.linux-x86_64-3.7/dgl/function creating build/lib.linux-x86_64-3.7/dgl/nn copying dgl/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn creating build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/dis_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/unified_tensor.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/graph_store.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib creating build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/checks.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/exception.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/pin_memory.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/internal.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/filter.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/data.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/__init__.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/shared_mem.py -> build/lib.linux-x86_64-3.7/dgl/utils creating build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/pytorch.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/__init__.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing creating build/lib.linux-x86_64-3.7/dgl/optim copying dgl/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim creating build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/base.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/graphsaint.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/neighbor_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading creating build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading creating build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/backend.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/set_default_backend.py -> build/lib.linux-x86_64-3.7/dgl/backend creating build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/edge_softmax.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/sddmm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/gather_mm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/segment.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/__init__.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/spmm.py -> build/lib.linux-x86_64-3.7/dgl/ops creating build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/nccl.py -> build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/__init__.py -> build/lib.linux-x86_64-3.7/dgl/cuda creating build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/pytorch_tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/base.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/__init__.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/numpy.py -> build/lib.linux-x86_64-3.7/dgl/storages creating build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/pinsage.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/negative.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/randomwalks.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/utils.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/node2vec_randomwalk.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/sampling creating build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm7b.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/graph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/dgl_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset_base.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9_edge.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gnn_benchmark.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/icews18.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gdelt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/utils.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/sbm.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/karate.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/citation_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/rdf.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/bitcoinotc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/ppi.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tensor_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/flickr.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/wikics.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/heterograph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tu.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gindt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tree.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/synthetic.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/adapter.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fakenews.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fraud.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/minigc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/yelp.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/reddit.py -> build/lib.linux-x86_64-3.7/dgl/data creating build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_graph.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_partition_book.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/partition.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/standalone_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_services.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/role.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/constants.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_server.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_context.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_client.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/id_map.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/server_state.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/shared_mem_utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_tensor.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/distributed creating build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/nodeflow.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/udf.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/view.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/kernel.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/graph.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/frame.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate creating build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/functional.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/__init__.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/module.py -> build/lib.linux-x86_64-3.7/dgl/transforms creating build/lib.linux-x86_64-3.7/dgl/distgnn copying dgl/distgnn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn creating build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/runtime_ctypes.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/libinfo.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/streams.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/base.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object_generic.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/linear.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/factory.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/nn/functional copying dgl/nn/functional/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/functional creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transe.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/edgepred.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transr.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dgnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/grouprevres.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/twirlsconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gcn2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dotgatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/cfconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gineconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatv2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/hgtconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/pnaconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/atomicconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/gnnexplainer.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv creating build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/dis_sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling creating build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data creating build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/optim/mxnet copying dgl/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/optim/tensorflow copying dgl/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet creating build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/nn copying dgl/distributed/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn creating build/lib.linux-x86_64-3.7/dgl/distributed/optim copying dgl/distributed/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet copying dgl/distributed/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow copying dgl/distributed/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet copying dgl/distributed/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow copying dgl/distributed/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/spmv.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/degree_bucketing.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/scheduler.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/adapter.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/runtime.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/program.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/executor.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/var.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/registry.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir creating build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/libra_partition.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition creating build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/tools.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools creating build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/types.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 copying dgl/_ffi/_cy3/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 copying dgl/_ffi/_cy2/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cython copying dgl/_ffi/_cython/core.cpp -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cython running build_ext building 'dgl._ffi._cy3.core' extension creating build/temp.linux-x86_64-3.7 creating build/temp.linux-x86_64-3.7/dgl creating build/temp.linux-x86_64-3.7/dgl/_ffi creating build/temp.linux-x86_64-3.7/dgl/_ffi/_cython gcc -pthread -B /opt/conda/envs/pytorch-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/pytorch-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/pytorch-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/pytorch-ci/compiler_compat -L/opt/conda/envs/pytorch-ci/lib -Wl,-rpath=/opt/conda/envs/pytorch-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 creating build/bdist.linux-x86_64 creating build/bdist.linux-x86_64/egg creating build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/sp_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/diag_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/reduction.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/__init__.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/elementwise_op_sp.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse creating build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/edge_coarsening.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/fps.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/capi.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/__init__.py -> build/bdist.linux-x86_64/egg/dgl/geometry creating build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/base.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/message.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/reducer.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/__init__.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/partition.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transe.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/edgepred.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transr.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/linear.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dgnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/grouprevres.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/twirlsconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gcn2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dotgatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/cfconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gineconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatv2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/hgtconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/pnaconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/atomicconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/gnnexplainer.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/factory.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/nn/functional/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/core.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/subgraph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/dis_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/unified_tensor.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/graph_store.py -> build/bdist.linux-x86_64/egg/dgl/contrib creating build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/dis_sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling creating build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/traversal.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/checks.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/exception.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/pin_memory.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/internal.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/filter.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/data.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/__init__.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/shared_mem.py -> build/bdist.linux-x86_64/egg/dgl/utils creating build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/pytorch.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/__init__.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/base.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/heterograph_index.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/sparse.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/global_config.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/base.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/graphsaint.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/neighbor_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_api_internal.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/heterograph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/backend/backend.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch creating build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/set_default_backend.py -> build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/network.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/edge_softmax.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/sddmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/gather_mm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/segment.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/__init__.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/spmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/logging.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/graph_index.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/nccl.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/__init__.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/udf.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/pytorch_tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/base.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/__init__.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/numpy.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/ndarray.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/init.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/view.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/pinsage.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/negative.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/randomwalks.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/utils.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/node2vec_randomwalk.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/generators.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm7b.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/graph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/dgl_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset_base.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9_edge.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gnn_benchmark.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/icews18.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gdelt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/utils.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/sbm.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/karate.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/citation_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/rdf.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/bitcoinotc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/ppi.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tensor_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/flickr.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/wikics.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/heterograph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tu.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gindt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tree.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/synthetic.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/adapter.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fakenews.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fraud.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/minigc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/yelp.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/reddit.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/convert.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/container.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/merge.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/readout.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/__init__.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_graph.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_partition_book.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/partition.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/standalone_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_services.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/role.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/constants.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_server.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_context.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_client.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/id_map.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/server_state.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/shared_mem_utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_tensor.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/nodeflow.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/udf.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/view.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/kernel.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/spmv.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/degree_bucketing.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/scheduler.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/adapter.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/runtime.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/program.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/executor.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/var.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/registry.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/graph.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/frame.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/propagate.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/batch.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/functional.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/__init__.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/module.py -> build/bdist.linux-x86_64/egg/dgl/transforms creating build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/libra_partition.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition creating build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/tools.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/runtime_ctypes.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/libinfo.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/streams.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/base.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/types.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object_generic.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cython/core.cpp -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/frame.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/random.py -> build/bdist.linux-x86_64/egg/dgl byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/sp_matrix.py to sp_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/diag_matrix.py to diag_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/reduction.py to reduction.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/elementwise_op_sp.py to elementwise_op_sp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/edge_coarsening.py to edge_coarsening.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/fps.py to fps.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/capi.py to capi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/message.py to message.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/reducer.py to reducer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transe.py to transe.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/edgepred.py to edgepred.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transr.py to transr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/linear.py to linear.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dgnconv.py to dgnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/grouprevres.py to grouprevres.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/twirlsconv.py to twirlsconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egatconv.py to egatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gcn2conv.py to gcn2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dotgatconv.py to dotgatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/cfconv.py to cfconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gineconv.py to gineconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatv2conv.py to gatv2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/hgtconv.py to hgtconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/pnaconv.py to pnaconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/atomicconv.py to atomicconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egnnconv.py to egnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/gnnexplainer.py to gnnexplainer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/factory.py to factory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/functional/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/core.py to core.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/subgraph.py to subgraph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/dis_kvstore.py to dis_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/unified_tensor.py to unified_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/graph_store.py to graph_store.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/sampler.py to sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/dis_sampler.py to dis_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/traversal.py to traversal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/checks.py to checks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/exception.py to exception.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/pin_memory.py to pin_memory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/internal.py to internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/filter.py to filter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/data.py to data.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/shared_mem.py to shared_mem.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/pytorch.py to pytorch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph_index.py to heterograph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/global_config.py to global_config.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/graphsaint.py to graphsaint.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/neighbor_sampler.py to neighbor_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_api_internal.py to _api_internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph.py to heterograph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/backend.py to backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/set_default_backend.py to set_default_backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/network.py to network.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/edge_softmax.py to edge_softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/sddmm.py to sddmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/gather_mm.py to gather_mm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/segment.py to segment.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/spmm.py to spmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/logging.py to logging.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/graph_index.py to graph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/nccl.py to nccl.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/pytorch_tensor.py to pytorch_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/numpy.py to numpy.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/init.py to init.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/pinsage.py to pinsage.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/negative.py to negative.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/randomwalks.py to randomwalks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/node2vec_randomwalk.py to node2vec_randomwalk.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/generators.py to generators.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm7b.py to qm7b.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/graph_serialize.py to graph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/dgl_dataset.py to dgl_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset_base.py to csv_dataset_base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9_edge.py to qm9_edge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gnn_benchmark.py to gnn_benchmark.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9.py to qm9.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/icews18.py to icews18.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gdelt.py to gdelt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/sbm.py to sbm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset.py to csv_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/karate.py to karate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/citation_graph.py to citation_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/rdf.py to rdf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/bitcoinotc.py to bitcoinotc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/ppi.py to ppi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tensor_serialize.py to tensor_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/flickr.py to flickr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/wikics.py to wikics.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/heterograph_serialize.py to heterograph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tu.py to tu.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gindt.py to gindt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tree.py to tree.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/synthetic.py to synthetic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fakenews.py to fakenews.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fraud.py to fraud.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/minigc.py to minigc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/yelp.py to yelp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/reddit.py to reddit.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/convert.py to convert.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/container.py to container.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/merge.py to merge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/readout.py to readout.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_graph.py to dist_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_partition_book.py to graph_partition_book.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/standalone_kvstore.py to standalone_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_services.py to graph_services.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/role.py to role.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/constants.py to constants.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_server.py to rpc_server.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/kvstore.py to kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_context.py to dist_context.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc.py to rpc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_client.py to rpc_client.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/id_map.py to id_map.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/server_state.py to server_state.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/shared_mem_utils.py to shared_mem_utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_tensor.py to dist_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/nodeflow.py to nodeflow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/kernel.py to kernel.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/spmv.py to spmv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/degree_bucketing.py to degree_bucketing.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/scheduler.py to scheduler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/runtime.py to runtime.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/program.py to program.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/executor.py to executor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/var.py to var.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/registry.py to registry.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/graph.py to graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/propagate.py to propagate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/batch.py to batch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/functional.py to functional.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/module.py to module.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/libra_partition.py to libra_partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/tools.py to tools.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/runtime_ctypes.py to runtime_ctypes.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/libinfo.py to libinfo.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/streams.py to streams.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/types.py to types.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object_generic.py to object_generic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/random.py to random.cpython-37.pyc creating stub loader for dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/core.py to core.cpython-37.pyc installing package data to build/bdist.linux-x86_64/egg running install_data copying ../build/libdgl.so -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/tensoradapter creating build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch copying ../build/tensoradapter/pytorch/libtensoradapter_pytorch_1.9.0.so -> build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch creating build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/not-zip-safe -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO writing build/bdist.linux-x86_64/egg/EGG-INFO/native_libs.txt creating dist creating 'dist/dgl-0.9-py3.7-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it removing 'build/bdist.linux-x86_64/egg' (and everything under it) Processing dgl-0.9-py3.7-linux-x86_64.egg creating /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Extracting dgl-0.9-py3.7-linux-x86_64.egg to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding dgl 0.9 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Processing dependencies for dgl==0.9 Searching for psutil>=5.8.0 Reading https://pypi.org/simple/psutil/ Downloading https://files.pythonhosted.org/packages/3d/73/d8c87b5612c58d1e6c6d91997c1590771d34e4ee27d9c11eb1e64ecbf365/psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=4fb54941aac044a61db9d8eb56fc5bee207db3bc58645d657249030e15ba3727 Best match: psutil 5.9.2 Processing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl Installing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding psutil 5.9.2 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/psutil-5.9.2-py3.7-linux-x86_64.egg Searching for tqdm==4.64.0 Best match: tqdm 4.64.0 Adding tqdm 4.64.0 to easy-install.pth file Installing tqdm script to /opt/conda/envs/pytorch-ci/bin Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for requests==2.28.1 Best match: requests 2.28.1 Adding requests 2.28.1 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for networkx==2.6.3 Best match: networkx 2.6.3 Adding networkx 2.6.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for scipy==1.7.3 Best match: scipy 1.7.3 Adding scipy 1.7.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for numpy==1.21.6 Best match: numpy 1.21.6 Adding numpy 1.21.6 to easy-install.pth file Installing f2py script to /opt/conda/envs/pytorch-ci/bin Installing f2py3 script to /opt/conda/envs/pytorch-ci/bin Installing f2py3.7 script to /opt/conda/envs/pytorch-ci/bin Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for certifi==2022.6.15 Best match: certifi 2022.6.15 Adding certifi 2022.6.15 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for urllib3==1.26.11 Best match: urllib3 1.26.11 Adding urllib3 1.26.11 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for idna==3.3 Best match: idna 3.3 Adding idna 3.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for charset-normalizer==2.1.0 Best match: charset-normalizer 2.1.0 Adding charset-normalizer 2.1.0 to easy-install.pth file Installing normalizer script to /opt/conda/envs/pytorch-ci/bin Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Finished processing dependencies for dgl==0.9 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/command/install.py:37: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools. setuptools.SetuptoolsDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/command/easy_install.py:147: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools. EasyInstallDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running build_ext building 'dgl._ffi._cy3.core' extension gcc -pthread -B /opt/conda/envs/pytorch-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/pytorch-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/pytorch-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/pytorch-ci/compiler_compat -L/opt/conda/envs/pytorch-ci/lib -Wl,-rpath=/opt/conda/envs/pytorch-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/pytorch-ci/compiler_compat/ld: warning: /opt/conda/envs/pytorch-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> dgl/_ffi/_cy3 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) WARNING: Skipping dgl as it is not installed. WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running install running bdist_egg running egg_info creating dgl.egg-info writing dgl.egg-info/PKG-INFO writing dependency_links to dgl.egg-info/dependency_links.txt writing requirements to dgl.egg-info/requires.txt writing top-level names to dgl.egg-info/top_level.txt writing manifest file 'dgl.egg-info/SOURCES.txt' reading manifest file 'dgl.egg-info/SOURCES.txt' writing manifest file 'dgl.egg-info/SOURCES.txt' installing library code to build/bdist.linux-x86_64/egg running install_lib running build_py creating build creating build/lib.linux-x86_64-3.7 creating build/lib.linux-x86_64-3.7/dgl copying dgl/partition.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/core.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/subgraph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/traversal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/base.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/sparse.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/global_config.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/_api_internal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/network.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/logging.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/graph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/udf.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/ndarray.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/init.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/view.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/generators.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/convert.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/container.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/merge.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/readout.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/__init__.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/propagate.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/batch.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/frame.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/random.py -> build/lib.linux-x86_64-3.7/dgl creating build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/sp_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/diag_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/reduction.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/__init__.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/elementwise_op_sp.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse creating build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/edge_coarsening.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/fps.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/capi.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/__init__.py -> build/lib.linux-x86_64-3.7/dgl/geometry creating build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/base.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/message.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/reducer.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/__init__.py -> build/lib.linux-x86_64-3.7/dgl/function creating build/lib.linux-x86_64-3.7/dgl/nn copying dgl/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn creating build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/dis_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/unified_tensor.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/graph_store.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib creating build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/checks.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/exception.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/pin_memory.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/internal.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/filter.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/data.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/__init__.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/shared_mem.py -> build/lib.linux-x86_64-3.7/dgl/utils creating build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/pytorch.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/__init__.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing creating build/lib.linux-x86_64-3.7/dgl/optim copying dgl/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim creating build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/base.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/graphsaint.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/neighbor_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading creating build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading creating build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/backend.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/set_default_backend.py -> build/lib.linux-x86_64-3.7/dgl/backend creating build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/edge_softmax.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/sddmm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/gather_mm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/segment.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/__init__.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/spmm.py -> build/lib.linux-x86_64-3.7/dgl/ops creating build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/nccl.py -> build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/__init__.py -> build/lib.linux-x86_64-3.7/dgl/cuda creating build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/pytorch_tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/base.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/__init__.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/numpy.py -> build/lib.linux-x86_64-3.7/dgl/storages creating build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/pinsage.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/negative.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/randomwalks.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/utils.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/node2vec_randomwalk.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/sampling creating build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm7b.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/graph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/dgl_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset_base.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9_edge.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gnn_benchmark.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/icews18.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gdelt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/utils.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/sbm.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/karate.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/citation_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/rdf.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/bitcoinotc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/ppi.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tensor_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/flickr.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/wikics.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/heterograph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tu.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gindt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tree.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/synthetic.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/adapter.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fakenews.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fraud.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/minigc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/yelp.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/reddit.py -> build/lib.linux-x86_64-3.7/dgl/data creating build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_graph.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_partition_book.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/partition.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/standalone_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_services.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/role.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/constants.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_server.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_context.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_client.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/id_map.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/server_state.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/shared_mem_utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_tensor.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/distributed creating build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/nodeflow.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/udf.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/view.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/kernel.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/graph.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/frame.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate creating build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/functional.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/__init__.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/module.py -> build/lib.linux-x86_64-3.7/dgl/transforms creating build/lib.linux-x86_64-3.7/dgl/distgnn copying dgl/distgnn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn creating build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/runtime_ctypes.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/libinfo.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/streams.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/base.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object_generic.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/linear.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/factory.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/nn/functional copying dgl/nn/functional/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/functional creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transe.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/edgepred.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transr.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dgnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/grouprevres.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/twirlsconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gcn2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dotgatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/cfconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gineconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatv2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/hgtconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/pnaconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/atomicconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/gnnexplainer.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv creating build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/dis_sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling creating build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data creating build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/optim/mxnet copying dgl/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/optim/tensorflow copying dgl/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet creating build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/nn copying dgl/distributed/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn creating build/lib.linux-x86_64-3.7/dgl/distributed/optim copying dgl/distributed/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet copying dgl/distributed/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow copying dgl/distributed/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet copying dgl/distributed/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow copying dgl/distributed/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/spmv.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/degree_bucketing.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/scheduler.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/adapter.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/runtime.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/program.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/executor.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/var.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/registry.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir creating build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/libra_partition.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition creating build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/tools.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools creating build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/types.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 copying dgl/_ffi/_cy3/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 copying dgl/_ffi/_cy2/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cython copying dgl/_ffi/_cython/core.cpp -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cython running build_ext building 'dgl._ffi._cy3.core' extension creating build/temp.linux-x86_64-3.7 creating build/temp.linux-x86_64-3.7/dgl creating build/temp.linux-x86_64-3.7/dgl/_ffi creating build/temp.linux-x86_64-3.7/dgl/_ffi/_cython gcc -pthread -B /opt/conda/envs/mxnet-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/mxnet-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/mxnet-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/mxnet-ci/compiler_compat -L/opt/conda/envs/mxnet-ci/lib -Wl,-rpath=/opt/conda/envs/mxnet-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 creating build/bdist.linux-x86_64 creating build/bdist.linux-x86_64/egg creating build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/sp_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/diag_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/reduction.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/__init__.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/elementwise_op_sp.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse creating build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/edge_coarsening.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/fps.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/capi.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/__init__.py -> build/bdist.linux-x86_64/egg/dgl/geometry creating build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/base.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/message.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/reducer.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/__init__.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/partition.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transe.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/edgepred.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transr.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/linear.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dgnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/grouprevres.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/twirlsconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gcn2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dotgatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/cfconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gineconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatv2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/hgtconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/pnaconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/atomicconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/gnnexplainer.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/factory.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/nn/functional/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/core.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/subgraph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/dis_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/unified_tensor.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/graph_store.py -> build/bdist.linux-x86_64/egg/dgl/contrib creating build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/dis_sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling creating build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/traversal.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/checks.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/exception.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/pin_memory.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/internal.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/filter.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/data.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/__init__.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/shared_mem.py -> build/bdist.linux-x86_64/egg/dgl/utils creating build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/pytorch.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/__init__.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/base.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/heterograph_index.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/sparse.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/global_config.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/base.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/graphsaint.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/neighbor_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_api_internal.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/heterograph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/backend/backend.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch creating build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/set_default_backend.py -> build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/network.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/edge_softmax.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/sddmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/gather_mm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/segment.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/__init__.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/spmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/logging.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/graph_index.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/nccl.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/__init__.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/udf.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/pytorch_tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/base.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/__init__.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/numpy.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/ndarray.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/init.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/view.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/pinsage.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/negative.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/randomwalks.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/utils.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/node2vec_randomwalk.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/generators.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm7b.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/graph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/dgl_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset_base.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9_edge.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gnn_benchmark.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/icews18.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gdelt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/utils.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/sbm.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/karate.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/citation_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/rdf.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/bitcoinotc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/ppi.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tensor_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/flickr.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/wikics.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/heterograph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tu.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gindt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tree.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/synthetic.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/adapter.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fakenews.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fraud.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/minigc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/yelp.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/reddit.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/convert.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/container.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/merge.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/readout.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/__init__.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_graph.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_partition_book.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/partition.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/standalone_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_services.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/role.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/constants.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_server.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_context.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_client.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/id_map.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/server_state.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/shared_mem_utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_tensor.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/nodeflow.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/udf.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/view.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/kernel.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/spmv.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/degree_bucketing.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/scheduler.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/adapter.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/runtime.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/program.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/executor.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/var.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/registry.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/graph.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/frame.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/propagate.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/batch.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/functional.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/__init__.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/module.py -> build/bdist.linux-x86_64/egg/dgl/transforms creating build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/libra_partition.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition creating build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/tools.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/runtime_ctypes.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/libinfo.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/streams.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/base.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/types.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object_generic.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cython/core.cpp -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/frame.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/random.py -> build/bdist.linux-x86_64/egg/dgl byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/sp_matrix.py to sp_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/diag_matrix.py to diag_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/reduction.py to reduction.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/elementwise_op_sp.py to elementwise_op_sp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/edge_coarsening.py to edge_coarsening.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/fps.py to fps.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/capi.py to capi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/message.py to message.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/reducer.py to reducer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transe.py to transe.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/edgepred.py to edgepred.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transr.py to transr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/linear.py to linear.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dgnconv.py to dgnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/grouprevres.py to grouprevres.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/twirlsconv.py to twirlsconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egatconv.py to egatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gcn2conv.py to gcn2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dotgatconv.py to dotgatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/cfconv.py to cfconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gineconv.py to gineconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatv2conv.py to gatv2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/hgtconv.py to hgtconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/pnaconv.py to pnaconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/atomicconv.py to atomicconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egnnconv.py to egnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/gnnexplainer.py to gnnexplainer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/factory.py to factory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/functional/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/core.py to core.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/subgraph.py to subgraph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/dis_kvstore.py to dis_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/unified_tensor.py to unified_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/graph_store.py to graph_store.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/sampler.py to sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/dis_sampler.py to dis_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/traversal.py to traversal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/checks.py to checks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/exception.py to exception.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/pin_memory.py to pin_memory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/internal.py to internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/filter.py to filter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/data.py to data.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/shared_mem.py to shared_mem.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/pytorch.py to pytorch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph_index.py to heterograph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/global_config.py to global_config.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/graphsaint.py to graphsaint.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/neighbor_sampler.py to neighbor_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_api_internal.py to _api_internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph.py to heterograph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/backend.py to backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/set_default_backend.py to set_default_backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/network.py to network.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/edge_softmax.py to edge_softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/sddmm.py to sddmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/gather_mm.py to gather_mm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/segment.py to segment.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/spmm.py to spmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/logging.py to logging.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/graph_index.py to graph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/nccl.py to nccl.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/pytorch_tensor.py to pytorch_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/numpy.py to numpy.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/init.py to init.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/pinsage.py to pinsage.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/negative.py to negative.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/randomwalks.py to randomwalks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/node2vec_randomwalk.py to node2vec_randomwalk.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/generators.py to generators.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm7b.py to qm7b.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/graph_serialize.py to graph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/dgl_dataset.py to dgl_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset_base.py to csv_dataset_base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9_edge.py to qm9_edge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gnn_benchmark.py to gnn_benchmark.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9.py to qm9.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/icews18.py to icews18.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gdelt.py to gdelt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/sbm.py to sbm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset.py to csv_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/karate.py to karate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/citation_graph.py to citation_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/rdf.py to rdf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/bitcoinotc.py to bitcoinotc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/ppi.py to ppi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tensor_serialize.py to tensor_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/flickr.py to flickr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/wikics.py to wikics.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/heterograph_serialize.py to heterograph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tu.py to tu.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gindt.py to gindt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tree.py to tree.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/synthetic.py to synthetic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fakenews.py to fakenews.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fraud.py to fraud.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/minigc.py to minigc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/yelp.py to yelp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/reddit.py to reddit.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/convert.py to convert.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/container.py to container.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/merge.py to merge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/readout.py to readout.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_graph.py to dist_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_partition_book.py to graph_partition_book.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/standalone_kvstore.py to standalone_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_services.py to graph_services.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/role.py to role.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/constants.py to constants.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_server.py to rpc_server.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/kvstore.py to kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_context.py to dist_context.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc.py to rpc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_client.py to rpc_client.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/id_map.py to id_map.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/server_state.py to server_state.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/shared_mem_utils.py to shared_mem_utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_tensor.py to dist_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/nodeflow.py to nodeflow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/kernel.py to kernel.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/spmv.py to spmv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/degree_bucketing.py to degree_bucketing.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/scheduler.py to scheduler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/runtime.py to runtime.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/program.py to program.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/executor.py to executor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/var.py to var.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/registry.py to registry.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/graph.py to graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/propagate.py to propagate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/batch.py to batch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/functional.py to functional.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/module.py to module.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/libra_partition.py to libra_partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/tools.py to tools.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/runtime_ctypes.py to runtime_ctypes.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/libinfo.py to libinfo.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/streams.py to streams.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/types.py to types.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object_generic.py to object_generic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/random.py to random.cpython-37.pyc creating stub loader for dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/core.py to core.cpython-37.pyc installing package data to build/bdist.linux-x86_64/egg running install_data copying ../build/libdgl.so -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/tensoradapter creating build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch copying ../build/tensoradapter/pytorch/libtensoradapter_pytorch_1.9.0.so -> build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch creating build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/not-zip-safe -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO writing build/bdist.linux-x86_64/egg/EGG-INFO/native_libs.txt creating dist creating 'dist/dgl-0.9-py3.7-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it removing 'build/bdist.linux-x86_64/egg' (and everything under it) Processing dgl-0.9-py3.7-linux-x86_64.egg creating /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Extracting dgl-0.9-py3.7-linux-x86_64.egg to /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Adding dgl 0.9 to easy-install.pth file Installed /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Processing dependencies for dgl==0.9 Searching for psutil>=5.8.0 Reading https://pypi.org/simple/psutil/ Downloading https://files.pythonhosted.org/packages/3d/73/d8c87b5612c58d1e6c6d91997c1590771d34e4ee27d9c11eb1e64ecbf365/psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=4fb54941aac044a61db9d8eb56fc5bee207db3bc58645d657249030e15ba3727 Best match: psutil 5.9.2 Processing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl Installing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl to /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Adding psutil 5.9.2 to easy-install.pth file Installed /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/psutil-5.9.2-py3.7-linux-x86_64.egg Searching for tqdm==4.64.0 Best match: tqdm 4.64.0 Adding tqdm 4.64.0 to easy-install.pth file Installing tqdm script to /opt/conda/envs/mxnet-ci/bin Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for requests==2.28.1 Best match: requests 2.28.1 Adding requests 2.28.1 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for networkx==2.6.3 Best match: networkx 2.6.3 Adding networkx 2.6.3 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for scipy==1.7.3 Best match: scipy 1.7.3 Adding scipy 1.7.3 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for numpy==1.21.6 Best match: numpy 1.21.6 Adding numpy 1.21.6 to easy-install.pth file Installing f2py script to /opt/conda/envs/mxnet-ci/bin Installing f2py3 script to /opt/conda/envs/mxnet-ci/bin Installing f2py3.7 script to /opt/conda/envs/mxnet-ci/bin Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for certifi==2022.6.15 Best match: certifi 2022.6.15 Adding certifi 2022.6.15 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for urllib3==1.26.11 Best match: urllib3 1.26.11 Adding urllib3 1.26.11 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for idna==3.3 Best match: idna 3.3 Adding idna 3.3 to easy-install.pth file Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Searching for charset-normalizer==2.1.0 Best match: charset-normalizer 2.1.0 Adding charset-normalizer 2.1.0 to easy-install.pth file Installing normalizer script to /opt/conda/envs/mxnet-ci/bin Using /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages Finished processing dependencies for dgl==0.9 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/setuptools/command/install.py:37: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools. setuptools.SetuptoolsDeprecationWarning, /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/setuptools/command/easy_install.py:147: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools. EasyInstallDeprecationWarning, /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running build_ext building 'dgl._ffi._cy3.core' extension gcc -pthread -B /opt/conda/envs/mxnet-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/mxnet-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/mxnet-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/mxnet-ci/compiler_compat -L/opt/conda/envs/mxnet-ci/lib -Wl,-rpath=/opt/conda/envs/mxnet-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/mxnet-ci/compiler_compat/ld: warning: /opt/conda/envs/mxnet-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> dgl/_ffi/_cy3 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) WARNING: Skipping dgl as it is not installed. WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running install running bdist_egg running egg_info creating dgl.egg-info writing dgl.egg-info/PKG-INFO writing dependency_links to dgl.egg-info/dependency_links.txt writing requirements to dgl.egg-info/requires.txt writing top-level names to dgl.egg-info/top_level.txt writing manifest file 'dgl.egg-info/SOURCES.txt' reading manifest file 'dgl.egg-info/SOURCES.txt' writing manifest file 'dgl.egg-info/SOURCES.txt' installing library code to build/bdist.linux-x86_64/egg running install_lib running build_py creating build creating build/lib.linux-x86_64-3.7 creating build/lib.linux-x86_64-3.7/dgl copying dgl/partition.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/core.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/subgraph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/traversal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/base.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/sparse.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/global_config.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/_api_internal.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/heterograph.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/network.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/logging.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/graph_index.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/udf.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/ndarray.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/init.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/view.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/generators.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/convert.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/container.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/merge.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/readout.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/__init__.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/propagate.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/batch.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/frame.py -> build/lib.linux-x86_64-3.7/dgl copying dgl/random.py -> build/lib.linux-x86_64-3.7/dgl creating build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/sp_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/diag_matrix.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/reduction.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/__init__.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse copying dgl/mock_sparse/elementwise_op_sp.py -> build/lib.linux-x86_64-3.7/dgl/mock_sparse creating build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/edge_coarsening.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/fps.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/capi.py -> build/lib.linux-x86_64-3.7/dgl/geometry copying dgl/geometry/__init__.py -> build/lib.linux-x86_64-3.7/dgl/geometry creating build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/base.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/message.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/reducer.py -> build/lib.linux-x86_64-3.7/dgl/function copying dgl/function/__init__.py -> build/lib.linux-x86_64-3.7/dgl/function creating build/lib.linux-x86_64-3.7/dgl/nn copying dgl/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn creating build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/dis_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/unified_tensor.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/graph_store.py -> build/lib.linux-x86_64-3.7/dgl/contrib copying dgl/contrib/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib creating build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/checks.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/exception.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/pin_memory.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/internal.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/filter.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/data.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/__init__.py -> build/lib.linux-x86_64-3.7/dgl/utils copying dgl/utils/shared_mem.py -> build/lib.linux-x86_64-3.7/dgl/utils creating build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/pytorch.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing copying dgl/multiprocessing/__init__.py -> build/lib.linux-x86_64-3.7/dgl/multiprocessing creating build/lib.linux-x86_64-3.7/dgl/optim copying dgl/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim creating build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/base.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/graphsaint.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/neighbor_sampler.py -> build/lib.linux-x86_64-3.7/dgl/dataloading copying dgl/dataloading/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/dataloading creating build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/shadow.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/negative_sampler.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading copying dgl/_dataloading/cluster_gcn.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading creating build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/backend.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend copying dgl/backend/set_default_backend.py -> build/lib.linux-x86_64-3.7/dgl/backend creating build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/edge_softmax.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/sddmm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/gather_mm.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/segment.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/__init__.py -> build/lib.linux-x86_64-3.7/dgl/ops copying dgl/ops/spmm.py -> build/lib.linux-x86_64-3.7/dgl/ops creating build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/nccl.py -> build/lib.linux-x86_64-3.7/dgl/cuda copying dgl/cuda/__init__.py -> build/lib.linux-x86_64-3.7/dgl/cuda creating build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/pytorch_tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/base.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/tensor.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/__init__.py -> build/lib.linux-x86_64-3.7/dgl/storages copying dgl/storages/numpy.py -> build/lib.linux-x86_64-3.7/dgl/storages creating build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/neighbor.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/pinsage.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/negative.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/randomwalks.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/utils.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/node2vec_randomwalk.py -> build/lib.linux-x86_64-3.7/dgl/sampling copying dgl/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/sampling creating build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm7b.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/graph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/dgl_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset_base.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9_edge.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gnn_benchmark.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/qm9.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/icews18.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gdelt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/utils.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/sbm.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/csv_dataset.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/karate.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/citation_graph.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/rdf.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/bitcoinotc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/ppi.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tensor_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/flickr.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/wikics.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/heterograph_serialize.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tu.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/gindt.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/tree.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/synthetic.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/adapter.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fakenews.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/fraud.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/minigc.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/yelp.py -> build/lib.linux-x86_64-3.7/dgl/data copying dgl/data/reddit.py -> build/lib.linux-x86_64-3.7/dgl/data creating build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_graph.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_partition_book.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/partition.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/standalone_kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/graph_services.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/role.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/constants.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_server.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/kvstore.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_context.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/rpc_client.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/id_map.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/server_state.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/shared_mem_utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_tensor.py -> build/lib.linux-x86_64-3.7/dgl/distributed copying dgl/distributed/dist_dataloader.py -> build/lib.linux-x86_64-3.7/dgl/distributed creating build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/nodeflow.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/udf.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/view.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/kernel.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/graph.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate copying dgl/_deprecate/frame.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate creating build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/functional.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/__init__.py -> build/lib.linux-x86_64-3.7/dgl/transforms copying dgl/transforms/module.py -> build/lib.linux-x86_64-3.7/dgl/transforms creating build/lib.linux-x86_64-3.7/dgl/distgnn copying dgl/distgnn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn creating build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/runtime_ctypes.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/libinfo.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/streams.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/base.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object_generic.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi copying dgl/_ffi/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/linear.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/factory.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch copying dgl/nn/pytorch/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet copying dgl/nn/mxnet/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/glob.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/hetero.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/utils.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow copying dgl/nn/tensorflow/softmax.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/nn/functional copying dgl/nn/functional/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/functional creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transe.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/edgepred.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transr.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dgnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/grouprevres.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/twirlsconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gcn2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dotgatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/cfconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gineconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatv2conv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/hgtconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/pnaconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/atomicconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv creating build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/gnnexplainer.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain creating build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/agnnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gmmconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densesageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/nnconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatedgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/tagconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densegraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv creating build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/relgraphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/appnpconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/graphconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/densechebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/ginconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sageconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/edgeconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/__init__.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/chebconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sgconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/gatconv.py -> build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv creating build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/dis_sampler.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling copying dgl/contrib/sampling/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/sampling creating build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/knowledge_graph.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data copying dgl/contrib/data/__init__.py -> build/lib.linux-x86_64-3.7/dgl/contrib/data creating build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch copying dgl/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/optim/mxnet copying dgl/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/optim/tensorflow copying dgl/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/dataloader.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch copying dgl/backend/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/pytorch creating build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet copying dgl/backend/mxnet/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/mxnet creating build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/tensor.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/backend/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/nn copying dgl/distributed/nn/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn creating build/lib.linux-x86_64-3.7/dgl/distributed/optim copying dgl/distributed/optim/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet copying dgl/distributed/nn/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow copying dgl/distributed/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/utils.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet copying dgl/distributed/optim/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet creating build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow copying dgl/distributed/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/spmv.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/degree_bucketing.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/scheduler.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/adapter.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/runtime.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime creating build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/program.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/executor.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/var.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/registry.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir creating build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/libra_partition.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition copying dgl/distgnn/partition/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/partition creating build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/__init__.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools copying dgl/distgnn/tools/tools.py -> build/lib.linux-x86_64-3.7/dgl/distgnn/tools creating build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/function.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/types.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/ndarray.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/object.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 copying dgl/_ffi/_cy3/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 copying dgl/_ffi/_cy2/__init__.py -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2 creating build/lib.linux-x86_64-3.7/dgl/_ffi/_cython copying dgl/_ffi/_cython/core.cpp -> build/lib.linux-x86_64-3.7/dgl/_ffi/_cython running build_ext building 'dgl._ffi._cy3.core' extension creating build/temp.linux-x86_64-3.7 creating build/temp.linux-x86_64-3.7/dgl creating build/temp.linux-x86_64-3.7/dgl/_ffi creating build/temp.linux-x86_64-3.7/dgl/_ffi/_cython gcc -pthread -B /opt/conda/envs/tensorflow-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/tensorflow-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/tensorflow-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/tensorflow-ci/compiler_compat -L/opt/conda/envs/tensorflow-ci/lib -Wl,-rpath=/opt/conda/envs/tensorflow-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 creating build/bdist.linux-x86_64 creating build/bdist.linux-x86_64/egg creating build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/sp_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/diag_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/reduction.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/__init__.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-3.7/dgl/mock_sparse/elementwise_op_sp.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse creating build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/edge_coarsening.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/fps.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/capi.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-3.7/dgl/geometry/__init__.py -> build/bdist.linux-x86_64/egg/dgl/geometry creating build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/base.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/message.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/reducer.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/function/__init__.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-3.7/dgl/partition.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transe.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/edgepred.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/transr.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/link/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/linear.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dgnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/grouprevres.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/twirlsconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gcn2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/dotgatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/cfconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gineconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatv2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/hgtconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/pnaconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/atomicconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/egnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/gnnexplainer.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/explain/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/factory.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/nn/pytorch/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/mxnet/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/nn/tensorflow/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/nn/functional/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-3.7/dgl/core.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/subgraph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/dis_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/unified_tensor.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/contrib/graph_store.py -> build/bdist.linux-x86_64/egg/dgl/contrib creating build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/dis_sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-3.7/dgl/contrib/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling creating build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-3.7/dgl/contrib/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-3.7/dgl/traversal.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/checks.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/exception.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/pin_memory.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/internal.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/filter.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/data.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/__init__.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-3.7/dgl/utils/shared_mem.py -> build/bdist.linux-x86_64/egg/dgl/utils creating build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/pytorch.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/multiprocessing/__init__.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-3.7/dgl/base.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/heterograph_index.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/sparse.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/global_config.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/base.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/graphsaint.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/neighbor_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-3.7/dgl/dataloading/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-3.7/dgl/_dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-3.7/dgl/_api_internal.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/heterograph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/backend/backend.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-3.7/dgl/backend/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch creating build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/mxnet/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-3.7/dgl/backend/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/tensorflow/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-3.7/dgl/backend/set_default_backend.py -> build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-3.7/dgl/network.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/edge_softmax.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/sddmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/gather_mm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/segment.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/__init__.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/ops/spmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-3.7/dgl/logging.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/graph_index.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/nccl.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/cuda/__init__.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-3.7/dgl/udf.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/pytorch_tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/base.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/__init__.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/storages/numpy.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-3.7/dgl/ndarray.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/init.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/view.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/pinsage.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/negative.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/randomwalks.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/utils.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/node2vec_randomwalk.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-3.7/dgl/generators.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm7b.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/graph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/dgl_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset_base.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9_edge.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gnn_benchmark.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/qm9.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/icews18.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gdelt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/utils.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/sbm.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/csv_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/karate.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/citation_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/rdf.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/bitcoinotc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/ppi.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tensor_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/flickr.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/wikics.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/heterograph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tu.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/gindt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/tree.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/synthetic.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/adapter.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fakenews.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/fraud.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/minigc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/yelp.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/data/reddit.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-3.7/dgl/convert.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/container.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/merge.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/readout.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/__init__.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_graph.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_partition_book.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/partition.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-3.7/dgl/distributed/standalone_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/graph_services.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/role.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/constants.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_server.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_context.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/rpc_client.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/id_map.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/server_state.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/shared_mem_utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_tensor.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-3.7/dgl/distributed/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/nodeflow.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/udf.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/view.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/kernel.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/spmv.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/degree_bucketing.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/scheduler.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/adapter.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/runtime.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/program.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/executor.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/var.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/registry.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/runtime/ir/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-3.7/dgl/_deprecate/graph.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/_deprecate/frame.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-3.7/dgl/propagate.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/batch.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/functional.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/__init__.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-3.7/dgl/transforms/module.py -> build/bdist.linux-x86_64/egg/dgl/transforms creating build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/libra_partition.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-3.7/dgl/distgnn/partition/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition creating build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/tools/tools.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-3.7/dgl/distgnn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/runtime_ctypes.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/libinfo.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/streams.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/base.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/types.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/_ctypes/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-3.7/dgl/_ffi/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object_generic.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-3.7/dgl/_ffi/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy2/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cython/core.cpp -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cython copying build/lib.linux-x86_64-3.7/dgl/frame.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-3.7/dgl/random.py -> build/bdist.linux-x86_64/egg/dgl byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/sp_matrix.py to sp_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/diag_matrix.py to diag_matrix.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/reduction.py to reduction.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/elementwise_op_sp.py to elementwise_op_sp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/edge_coarsening.py to edge_coarsening.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/fps.py to fps.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/capi.py to capi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/message.py to message.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/reducer.py to reducer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transe.py to transe.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/edgepred.py to edgepred.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transr.py to transr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/linear.py to linear.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dgnconv.py to dgnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/grouprevres.py to grouprevres.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/twirlsconv.py to twirlsconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egatconv.py to egatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gcn2conv.py to gcn2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dotgatconv.py to dotgatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/cfconv.py to cfconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gineconv.py to gineconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatv2conv.py to gatv2conv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/hgtconv.py to hgtconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/pnaconv.py to pnaconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/atomicconv.py to atomicconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egnnconv.py to egnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/gnnexplainer.py to gnnexplainer.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/factory.py to factory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/agnnconv.py to agnnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gmmconv.py to gmmconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densesageconv.py to densesageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/nnconv.py to nnconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatedgraphconv.py to gatedgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/tagconv.py to tagconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densegraphconv.py to densegraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/glob.py to glob.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/hetero.py to hetero.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/relgraphconv.py to relgraphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/appnpconv.py to appnpconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/graphconv.py to graphconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/densechebconv.py to densechebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/ginconv.py to ginconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sageconv.py to sageconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/edgeconv.py to edgeconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/chebconv.py to chebconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sgconv.py to sgconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/gatconv.py to gatconv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/softmax.py to softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/functional/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/core.py to core.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/subgraph.py to subgraph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/dis_kvstore.py to dis_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/unified_tensor.py to unified_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/graph_store.py to graph_store.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/sampler.py to sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/dis_sampler.py to dis_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/traversal.py to traversal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/checks.py to checks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/exception.py to exception.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/pin_memory.py to pin_memory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/internal.py to internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/filter.py to filter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/data.py to data.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/shared_mem.py to shared_mem.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/pytorch.py to pytorch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph_index.py to heterograph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/global_config.py to global_config.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/graphsaint.py to graphsaint.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/neighbor_sampler.py to neighbor_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/dataloader.py to dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/shadow.py to shadow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/negative_sampler.py to negative_sampler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/cluster_gcn.py to cluster_gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_api_internal.py to _api_internal.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph.py to heterograph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/backend.py to backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse.py to sparse.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/set_default_backend.py to set_default_backend.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/network.py to network.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/edge_softmax.py to edge_softmax.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/sddmm.py to sddmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/gather_mm.py to gather_mm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/segment.py to segment.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/spmm.py to spmm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/logging.py to logging.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/graph_index.py to graph_index.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/nccl.py to nccl.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/pytorch_tensor.py to pytorch_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/tensor.py to tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/numpy.py to numpy.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/init.py to init.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/neighbor.py to neighbor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/pinsage.py to pinsage.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/negative.py to negative.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/randomwalks.py to randomwalks.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/node2vec_randomwalk.py to node2vec_randomwalk.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/generators.py to generators.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm7b.py to qm7b.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/graph_serialize.py to graph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/dgl_dataset.py to dgl_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset_base.py to csv_dataset_base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/knowledge_graph.py to knowledge_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9_edge.py to qm9_edge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gnn_benchmark.py to gnn_benchmark.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9.py to qm9.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/icews18.py to icews18.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gdelt.py to gdelt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/sbm.py to sbm.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset.py to csv_dataset.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/karate.py to karate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/citation_graph.py to citation_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/rdf.py to rdf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/bitcoinotc.py to bitcoinotc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/ppi.py to ppi.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tensor_serialize.py to tensor_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/flickr.py to flickr.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/wikics.py to wikics.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/heterograph_serialize.py to heterograph_serialize.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tu.py to tu.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gindt.py to gindt.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tree.py to tree.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/synthetic.py to synthetic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fakenews.py to fakenews.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fraud.py to fraud.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/minigc.py to minigc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/yelp.py to yelp.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/reddit.py to reddit.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/convert.py to convert.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/container.py to container.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/merge.py to merge.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/readout.py to readout.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_graph.py to dist_graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_partition_book.py to graph_partition_book.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/partition.py to partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/sparse_emb.py to sparse_emb.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/utils.py to utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/sparse_optim.py to sparse_optim.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/standalone_kvstore.py to standalone_kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_services.py to graph_services.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/role.py to role.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/constants.py to constants.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_server.py to rpc_server.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/kvstore.py to kvstore.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_context.py to dist_context.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc.py to rpc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_client.py to rpc_client.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/id_map.py to id_map.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/server_state.py to server_state.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/shared_mem_utils.py to shared_mem_utils.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_tensor.py to dist_tensor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_dataloader.py to dist_dataloader.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/nodeflow.py to nodeflow.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/udf.py to udf.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/view.py to view.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/kernel.py to kernel.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/spmv.py to spmv.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/degree_bucketing.py to degree_bucketing.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/scheduler.py to scheduler.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/adapter.py to adapter.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/runtime.py to runtime.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/program.py to program.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/executor.py to executor.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/var.py to var.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/registry.py to registry.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/graph.py to graph.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/propagate.py to propagate.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/batch.py to batch.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/functional.py to functional.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/module.py to module.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/libra_partition.py to libra_partition.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/tools.py to tools.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/runtime_ctypes.py to runtime_ctypes.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/libinfo.py to libinfo.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/streams.py to streams.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/base.py to base.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/function.py to function.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/types.py to types.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/ndarray.py to ndarray.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object_generic.py to object_generic.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object.py to object.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/frame.py to frame.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/random.py to random.cpython-37.pyc creating stub loader for dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/core.py to core.cpython-37.pyc installing package data to build/bdist.linux-x86_64/egg running install_data copying ../build/libdgl.so -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/tensoradapter creating build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch copying ../build/tensoradapter/pytorch/libtensoradapter_pytorch_1.9.0.so -> build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch creating build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/not-zip-safe -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO writing build/bdist.linux-x86_64/egg/EGG-INFO/native_libs.txt creating dist creating 'dist/dgl-0.9-py3.7-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it removing 'build/bdist.linux-x86_64/egg' (and everything under it) Processing dgl-0.9-py3.7-linux-x86_64.egg creating /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Extracting dgl-0.9-py3.7-linux-x86_64.egg to /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Adding dgl 0.9 to easy-install.pth file Installed /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/dgl-0.9-py3.7-linux-x86_64.egg Processing dependencies for dgl==0.9 Searching for psutil>=5.8.0 Reading https://pypi.org/simple/psutil/ Downloading https://files.pythonhosted.org/packages/3d/73/d8c87b5612c58d1e6c6d91997c1590771d34e4ee27d9c11eb1e64ecbf365/psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=4fb54941aac044a61db9d8eb56fc5bee207db3bc58645d657249030e15ba3727 Best match: psutil 5.9.2 Processing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl Installing psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl to /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Adding psutil 5.9.2 to easy-install.pth file Installed /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/psutil-5.9.2-py3.7-linux-x86_64.egg Searching for tqdm==4.64.0 Best match: tqdm 4.64.0 Adding tqdm 4.64.0 to easy-install.pth file Installing tqdm script to /opt/conda/envs/tensorflow-ci/bin Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for requests==2.28.1 Best match: requests 2.28.1 Adding requests 2.28.1 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for networkx==2.6.3 Best match: networkx 2.6.3 Adding networkx 2.6.3 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for scipy==1.4.1 Best match: scipy 1.4.1 Adding scipy 1.4.1 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for numpy==1.18.5 Best match: numpy 1.18.5 Adding numpy 1.18.5 to easy-install.pth file Installing f2py script to /opt/conda/envs/tensorflow-ci/bin Installing f2py3 script to /opt/conda/envs/tensorflow-ci/bin Installing f2py3.7 script to /opt/conda/envs/tensorflow-ci/bin Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for certifi==2022.6.15 Best match: certifi 2022.6.15 Adding certifi 2022.6.15 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for urllib3==1.26.11 Best match: urllib3 1.26.11 Adding urllib3 1.26.11 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for idna==3.3 Best match: idna 3.3 Adding idna 3.3 to easy-install.pth file Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Searching for charset-normalizer==2.1.0 Best match: charset-normalizer 2.1.0 Adding charset-normalizer 2.1.0 to easy-install.pth file Installing normalizer script to /opt/conda/envs/tensorflow-ci/bin Using /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages Finished processing dependencies for dgl==0.9 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/command/install.py:37: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools. setuptools.SetuptoolsDeprecationWarning, /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/command/easy_install.py:147: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools. EasyInstallDeprecationWarning, /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, [1/1] Cythonizing dgl/_ffi/_cython/core.pyx running build_ext building 'dgl._ffi._cy3.core' extension gcc -pthread -B /opt/conda/envs/tensorflow-ci/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I../include/ -I../third_party/dmlc-core/include -I../third_party/dlpack/include -I/opt/conda/envs/tensorflow-ci/include/python3.7m -c dgl/_ffi/_cython/core.cpp -o build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++ g++ -pthread -B /opt/conda/envs/tensorflow-ci/compiler_compat -Wl,--sysroot=/ -pthread -shared -B /opt/conda/envs/tensorflow-ci/compiler_compat -L/opt/conda/envs/tensorflow-ci/lib -Wl,-rpath=/opt/conda/envs/tensorflow-ci/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/dgl/_ffi/_cython/core.o -Ldgl -L../build/Release -L../build -ldgl -o build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libstdc++.so: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010001 /opt/conda/envs/tensorflow-ci/compiler_compat/ld: warning: /opt/conda/envs/tensorflow-ci/lib/libgcc_s.so.1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0010002 copying build/lib.linux-x86_64-3.7/dgl/_ffi/_cy3/core.cpython-37m-x86_64-linux-gnu.so -> dgl/_ffi/_cy3 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/_ffi/_cython/core.pyx tree = Parsing.p_module(s, pxd, full_module_name) ~/jenkins/workspace/dgl_PR-4648@2 [Pipeline] sh + ls -lh /usr/lib/x86_64-linux-gnu/ total 1.8G -rw-r--r-- 1 root root 496 May 3 10:19 Mcrt1.o -rw-r--r-- 1 root root 1.8K May 3 10:19 Scrt1.o drwxr-xr-x 1 root root 4.0K May 28 19:15 audit drwxr-xr-x 2 root root 4.0K Aug 16 08:53 avahi drwxr-xr-x 2 root root 4.0K Apr 28 00:03 coreutils -rw-r--r-- 1 root root 1.9K May 3 10:19 crt1.o -rw-r--r-- 1 root root 1.2K May 3 10:19 crti.o -rw-r--r-- 1 root root 648 May 3 10:19 crtn.o drwxr-xr-x 2 root root 4.0K Aug 16 08:53 dri drwxr-xr-x 2 root root 4.0K May 23 21:53 engines-1.1 drwxr-xr-x 1 root root 12K May 28 19:15 gconv -rw-r--r-- 1 root root 2.5K May 3 10:19 gcrt1.o drwxr-xr-x 2 root root 4.0K Aug 16 08:53 gdcm-2.8 drwxr-xr-x 3 root root 4.0K Aug 16 08:53 gdk-pixbuf-2.0 drwxr-xr-x 3 root root 4.0K Aug 16 08:53 gio drwxr-xr-x 2 root root 4.0K Aug 16 08:53 girepository-1.0 drwxr-xr-x 3 root root 4.0K Aug 16 08:53 glib-2.0 drwxr-xr-x 2 root root 4.0K Aug 16 08:54 graphviz -rw-r--r-- 1 root root 2.3K May 3 10:19 grcrt1.o drwxr-xr-x 3 root root 4.0K Aug 16 08:53 gtk-2.0 drwxr-xr-x 3 root root 4.0K Aug 16 08:53 gtk-3.0 drwxr-xr-x 2 root root 4.0K Aug 16 08:53 hwloc drwxr-xr-x 3 root root 4.0K Aug 16 08:53 icu drwxr-xr-x 3 root root 4.0K Aug 16 08:53 krb5 drwxr-xr-x 2 root root 4.0K May 28 19:15 ldscripts -rw-r--r-- 1 root root 2.0K May 3 10:19 libBrokenLocale.a lrwxrwxrwx 1 root root 42 May 3 10:19 libBrokenLocale.so -> /lib/x86_64-linux-gnu/libBrokenLocale.so.1 lrwxrwxrwx 1 root root 16 May 23 2016 libCharLS.so.1 -> libCharLS.so.1.0 -rw-r--r-- 1 root root 219K May 23 2016 libCharLS.so.1.0 lrwxrwxrwx 1 root root 14 May 10 2019 libGL.so.1 -> libGL.so.1.0.0 -rw-r--r-- 1 root root 555K May 10 2019 libGL.so.1.0.0 lrwxrwxrwx 1 root root 15 May 10 2019 libGLX.so.0 -> libGLX.so.0.0.0 -rw-r--r-- 1 root root 67K May 10 2019 libGLX.so.0.0.0 lrwxrwxrwx 1 root root 16 Jun 12 2020 libGLX_indirect.so.0 -> libGLX_mesa.so.0 lrwxrwxrwx 1 root root 20 Jun 12 2020 libGLX_mesa.so.0 -> libGLX_mesa.so.0.0.0 -rw-r--r-- 1 root root 477K Jun 12 2020 libGLX_mesa.so.0.0.0 lrwxrwxrwx 1 root root 22 May 10 2019 libGLdispatch.so.0 -> libGLdispatch.so.0.0.0 -rw-r--r-- 1 root root 599K May 10 2019 libGLdispatch.so.0.0.0 lrwxrwxrwx 1 root root 17 Apr 16 2016 libHalf.so -> libHalf.so.12.0.0 lrwxrwxrwx 1 root root 17 Apr 16 2016 libHalf.so.12 -> libHalf.so.12.0.0 -rw-r--r-- 1 root root 267K Apr 16 2016 libHalf.so.12.0.0 lrwxrwxrwx 1 root root 15 Feb 28 2017 libICE.so.6 -> libICE.so.6.3.0 -rw-r--r-- 1 root root 96K Feb 28 2017 libICE.so.6.3.0 lrwxrwxrwx 1 root root 20 Apr 16 2016 libIex-2_2.so.12 -> libIex-2_2.so.12.0.0 -rw-r--r-- 1 root root 119K Apr 16 2016 libIex-2_2.so.12.0.0 lrwxrwxrwx 1 root root 20 Apr 16 2016 libIex.so -> libIex-2_2.so.12.0.0 lrwxrwxrwx 1 root root 24 Apr 16 2016 libIexMath-2_2.so.12 -> libIexMath-2_2.so.12.0.0 -rw-r--r-- 1 root root 15K Apr 16 2016 libIexMath-2_2.so.12.0.0 lrwxrwxrwx 1 root root 24 Apr 16 2016 libIexMath.so -> libIexMath-2_2.so.12.0.0 lrwxrwxrwx 1 root root 23 Nov 16 2021 libIlmImf-2_2.so.22 -> libIlmImf-2_2.so.22.0.0 -rw-r--r-- 1 root root 2.8M Nov 16 2021 libIlmImf-2_2.so.22.0.0 -rw-r--r-- 1 root root 4.4M Nov 16 2021 libIlmImf.a lrwxrwxrwx 1 root root 23 Nov 16 2021 libIlmImf.so -> libIlmImf-2_2.so.22.0.0 lrwxrwxrwx 1 root root 27 Nov 16 2021 libIlmImfUtil-2_2.so.22 -> libIlmImfUtil-2_2.so.22.0.0 -rw-r--r-- 1 root root 140K Nov 16 2021 libIlmImfUtil-2_2.so.22.0.0 -rw-r--r-- 1 root root 319K Nov 16 2021 libIlmImfUtil.a lrwxrwxrwx 1 root root 27 Nov 16 2021 libIlmImfUtil.so -> libIlmImfUtil-2_2.so.22.0.0 lrwxrwxrwx 1 root root 26 Apr 16 2016 libIlmThread-2_2.so.12 -> libIlmThread-2_2.so.12.0.0 -rw-r--r-- 1 root root 27K Apr 16 2016 libIlmThread-2_2.so.12.0.0 lrwxrwxrwx 1 root root 26 Apr 16 2016 libIlmThread.so -> libIlmThread-2_2.so.12.0.0 lrwxrwxrwx 1 root root 22 Apr 16 2016 libImath-2_2.so.12 -> libImath-2_2.so.12.0.0 -rw-r--r-- 1 root root 71K Apr 16 2016 libImath-2_2.so.12.0.0 lrwxrwxrwx 1 root root 22 Apr 16 2016 libImath.so -> libImath-2_2.so.12.0.0 lrwxrwxrwx 1 root root 15 Jul 14 2020 libLLVM-10.so -> libLLVM-10.so.1 -rw-r--r-- 1 root root 71M Jul 14 2020 libLLVM-10.so.1 lrwxrwxrwx 1 root root 18 Apr 5 2017 libOpenCL.so.1 -> libOpenCL.so.1.0.0 -rw-r--r-- 1 root root 43K Apr 5 2017 libOpenCL.so.1.0.0 lrwxrwxrwx 1 root root 14 Jul 13 2014 libSM.so.6 -> libSM.so.6.0.1 -rw-r--r-- 1 root root 31K Jul 13 2014 libSM.so.6.0.1 lrwxrwxrwx 1 root root 19 May 19 2021 libX11-xcb.so.1 -> libX11-xcb.so.1.0.0 -rw-r--r-- 1 root root 5.7K May 19 2021 libX11-xcb.so.1.0.0 lrwxrwxrwx 1 root root 15 May 19 2021 libX11.so.6 -> libX11.so.6.3.0 -rw-r--r-- 1 root root 1.3M May 19 2021 libX11.so.6.3.0 lrwxrwxrwx 1 root root 15 Apr 21 2020 libXau.so.6 -> libXau.so.6.0.0 -rw-r--r-- 1 root root 15K Apr 21 2020 libXau.so.6.0.0 lrwxrwxrwx 1 root root 12 Aug 3 2015 libXaw.so.7 -> libXaw7.so.7 lrwxrwxrwx 1 root root 16 Aug 3 2015 libXaw7.so.7 -> libXaw7.so.7.0.0 -rw-r--r-- 1 root root 461K Aug 3 2015 libXaw7.so.7.0.0 lrwxrwxrwx 1 root root 22 Jan 20 2017 libXcomposite.so.1 -> libXcomposite.so.1.0.0 -rw-r--r-- 1 root root 11K Jan 20 2017 libXcomposite.so.1.0.0 lrwxrwxrwx 1 root root 19 Dec 19 2017 libXcursor.so.1 -> libXcursor.so.1.0.2 -rw-r--r-- 1 root root 39K Dec 19 2017 libXcursor.so.1.0.2 lrwxrwxrwx 1 root root 19 Aug 20 2017 libXdamage.so.1 -> libXdamage.so.1.1.0 -rw-r--r-- 1 root root 11K Aug 20 2017 libXdamage.so.1.1.0 lrwxrwxrwx 1 root root 17 Mar 2 2017 libXdmcp.so.6 -> libXdmcp.so.6.0.0 -rw-r--r-- 1 root root 23K Mar 2 2017 libXdmcp.so.6.0.0 lrwxrwxrwx 1 root root 16 Oct 25 2014 libXext.so.6 -> libXext.so.6.4.0 -rw-r--r-- 1 root root 72K Oct 25 2014 libXext.so.6.4.0 lrwxrwxrwx 1 root root 18 Dec 5 2016 libXfixes.so.3 -> libXfixes.so.3.1.0 -rw-r--r-- 1 root root 23K Dec 5 2016 libXfixes.so.3.1.0 lrwxrwxrwx 1 root root 15 Jul 13 2014 libXft.so.2 -> libXft.so.2.3.2 -rw-r--r-- 1 root root 84K Jul 13 2014 libXft.so.2.3.2 lrwxrwxrwx 1 root root 14 Jan 23 2017 libXi.so.6 -> libXi.so.6.1.0 -rw-r--r-- 1 root root 63K Jan 23 2017 libXi.so.6.1.0 lrwxrwxrwx 1 root root 20 Jun 30 2013 libXinerama.so.1 -> libXinerama.so.1.0.0 -rw-r--r-- 1 root root 11K Jun 30 2013 libXinerama.so.1.0.0 lrwxrwxrwx 1 root root 15 Dec 10 2015 libXmu.so.6 -> libXmu.so.6.2.0 -rw-r--r-- 1 root root 101K Dec 10 2015 libXmu.so.6.2.0 lrwxrwxrwx 1 root root 16 Dec 22 2016 libXpm.so.4 -> libXpm.so.4.11.0 -rw-r--r-- 1 root root 71K Dec 22 2016 libXpm.so.4.11.0 lrwxrwxrwx 1 root root 18 Dec 6 2016 libXrandr.so.2 -> libXrandr.so.2.2.0 -rw-r--r-- 1 root root 43K Dec 6 2016 libXrandr.so.2.2.0 lrwxrwxrwx 1 root root 19 Dec 5 2016 libXrender.so.1 -> libXrender.so.1.3.0 -rw-r--r-- 1 root root 39K Dec 5 2016 libXrender.so.1.3.0 lrwxrwxrwx 1 root root 15 May 9 2012 libXss.so.1 -> libXss.so.1.0.0 -rw-r--r-- 1 root root 15K May 9 2012 libXss.so.1.0.0 lrwxrwxrwx 1 root root 14 May 24 2016 libXt.so.6 -> libXt.so.6.0.0 -rw-r--r-- 1 root root 416K May 24 2016 libXt.so.6.0.0 lrwxrwxrwx 1 root root 19 May 6 2015 libXxf86vm.so.1 -> libXxf86vm.so.1.0.0 -rw-r--r-- 1 root root 23K May 6 2015 libXxf86vm.so.1.0.0 lrwxrwxrwx 1 root root 15 Jul 28 2017 libaec.so.0 -> libaec.so.0.0.3 -rw-r--r-- 1 root root 30K Jul 28 2017 libaec.so.0.0.3 -rw-r--r-- 1 root root 23K May 3 10:19 libanl.a lrwxrwxrwx 1 root root 33 May 3 10:19 libanl.so -> /lib/x86_64-linux-gnu/libanl.so.1 lrwxrwxrwx 1 root root 20 Jun 15 2021 libapt-inst.so.2.0 -> libapt-inst.so.2.0.0 -rw-r--r-- 1 root root 51K Jun 15 2021 libapt-inst.so.2.0.0 lrwxrwxrwx 1 root root 19 Jun 15 2021 libapt-pkg.so.5.0 -> libapt-pkg.so.5.0.2 -rw-r--r-- 1 root root 1.8M Jun 15 2021 libapt-pkg.so.5.0.2 lrwxrwxrwx 1 root root 23 Jun 15 2021 libapt-private.so.0.0 -> libapt-private.so.0.0.0 -rw-r--r-- 1 root root 415K Jun 15 2021 libapt-private.so.0.0.0 lrwxrwxrwx 1 root root 18 Jan 24 2018 libarpack.so.2 -> libarpack.so.2.0.0 -rw-r--r-- 1 root root 295K Jan 24 2018 libarpack.so.2.0.0 lrwxrwxrwx 1 root root 16 Dec 4 2019 libasan.so.4 -> libasan.so.4.0.0 -rw-r--r-- 1 root root 1.4M Dec 4 2019 libasan.so.4.0.0 lrwxrwxrwx 1 root root 16 Dec 15 2017 libasn1.so.8 -> libasn1.so.8.0.0 -rw-r--r-- 1 root root 647K Dec 15 2017 libasn1.so.8.0.0 lrwxrwxrwx 1 root root 18 Feb 6 2018 libassuan.so.0 -> libassuan.so.0.8.1 -rw-r--r-- 1 root root 75K Feb 6 2018 libassuan.so.0.8.1 lrwxrwxrwx 1 root root 23 Mar 13 2018 libatk-1.0.so.0 -> libatk-1.0.so.0.22810.1 -rw-r--r-- 1 root root 150K Mar 13 2018 libatk-1.0.so.0.22810.1 lrwxrwxrwx 1 root root 26 Mar 13 2018 libatk-bridge-2.0.so.0 -> libatk-bridge-2.0.so.0.0.0 -rw-r--r-- 1 root root 194K Mar 13 2018 libatk-bridge-2.0.so.0.0.0 lrwxrwxrwx 1 root root 18 Mar 10 2020 libatomic.so.1 -> libatomic.so.1.2.0 -rw-r--r-- 1 root root 27K Mar 10 2020 libatomic.so.1.2.0 lrwxrwxrwx 1 root root 17 Mar 13 2018 libatspi.so.0 -> libatspi.so.0.0.1 -rw-r--r-- 1 root root 191K Mar 13 2018 libatspi.so.0.0.1 lrwxrwxrwx 1 root root 24 Jul 6 2021 libavahi-client.so.3 -> libavahi-client.so.3.2.9 -rw-r--r-- 1 root root 67K Jul 6 2021 libavahi-client.so.3.2.9 lrwxrwxrwx 1 root root 24 Jul 6 2021 libavahi-common.so.3 -> libavahi-common.so.3.5.3 -rw-r--r-- 1 root root 47K Jul 6 2021 libavahi-common.so.3.5.3 -rw-r--r-- 1 root root 19M May 18 20:01 libavcodec.a lrwxrwxrwx 1 root root 24 May 18 20:01 libavcodec.so -> libavcodec.so.57.107.100 lrwxrwxrwx 1 root root 24 May 18 20:01 libavcodec.so.57 -> libavcodec.so.57.107.100 -rw-r--r-- 1 root root 14M May 18 20:01 libavcodec.so.57.107.100 -rw-r--r-- 1 root root 4.7M May 18 20:01 libavformat.a lrwxrwxrwx 1 root root 24 May 18 20:01 libavformat.so -> libavformat.so.57.83.100 lrwxrwxrwx 1 root root 24 May 18 20:01 libavformat.so.57 -> libavformat.so.57.83.100 -rw-r--r-- 1 root root 2.4M May 18 20:01 libavformat.so.57.83.100 -rw-r--r-- 1 root root 214K May 18 20:01 libavresample.a lrwxrwxrwx 1 root root 22 May 18 20:01 libavresample.so -> libavresample.so.3.7.0 lrwxrwxrwx 1 root root 22 May 18 20:01 libavresample.so.3 -> libavresample.so.3.7.0 -rw-r--r-- 1 root root 130K May 18 20:01 libavresample.so.3.7.0 -rw-r--r-- 1 root root 755K May 18 20:01 libavutil.a lrwxrwxrwx 1 root root 22 May 18 20:01 libavutil.so -> libavutil.so.55.78.100 lrwxrwxrwx 1 root root 22 May 18 20:01 libavutil.so.55 -> libavutil.so.55.78.100 -rw-r--r-- 1 root root 487K May 18 20:01 libavutil.so.55.78.100 -rw-r--r-- 1 root root 1.3M Oct 20 2021 libbfd-2.30-system.so lrwxrwxrwx 1 root root 44 Aug 16 08:54 libblas.a -> /etc/alternatives/libblas.a-x86_64-linux-gnu lrwxrwxrwx 1 root root 45 Aug 16 08:54 libblas.so -> /etc/alternatives/libblas.so-x86_64-linux-gnu lrwxrwxrwx 1 root root 47 Aug 16 08:54 libblas.so.3 -> /etc/alternatives/libblas.so.3-x86_64-linux-gnu lrwxrwxrwx 1 root root 18 Apr 3 2018 libbluray.so.2 -> libbluray.so.2.0.2 -rw-r--r-- 1 root root 319K Apr 3 2018 libbluray.so.2.0.2 -rw-r--r-- 1 root root 5.3M May 3 10:19 libc.a -rw-r--r-- 1 root root 298 May 3 10:19 libc.so -rw-r--r-- 1 root root 20K May 3 10:19 libc_nonshared.a lrwxrwxrwx 1 root root 29 Jan 22 2019 libcairo-gobject.so.2 -> libcairo-gobject.so.2.11510.0 -rw-r--r-- 1 root root 34K Jan 22 2019 libcairo-gobject.so.2.11510.0 lrwxrwxrwx 1 root root 21 Jan 22 2019 libcairo.so.2 -> libcairo.so.2.11510.0 -rw-r--r-- 1 root root 1.2M Jan 22 2019 libcairo.so.2.11510.0 lrwxrwxrwx 1 root root 15 Mar 10 2020 libcc1.so.0 -> libcc1.so.0.0.0 -rw-r--r-- 1 root root 116K Mar 10 2020 libcc1.so.0.0.0 lrwxrwxrwx 1 root root 15 Mar 24 2018 libcdt.so -> libcdt.so.5.0.0 lrwxrwxrwx 1 root root 15 Mar 24 2018 libcdt.so.5 -> libcdt.so.5.0.0 -rw-r--r-- 1 root root 27K Mar 24 2018 libcdt.so.5.0.0 lrwxrwxrwx 1 root root 18 Mar 24 2018 libcgraph.so -> libcgraph.so.6.0.0 lrwxrwxrwx 1 root root 18 Mar 24 2018 libcgraph.so.6 -> libcgraph.so.6.0.0 -rw-r--r-- 1 root root 88K Mar 24 2018 libcgraph.so.6.0.0 lrwxrwxrwx 1 root root 23 Feb 7 2018 libchromaprint.so.1 -> libchromaprint.so.1.4.3 -rw-r--r-- 1 root root 75K Feb 7 2018 libchromaprint.so.1.4.3 lrwxrwxrwx 1 root root 34 May 3 10:19 libcidn.so -> /lib/x86_64-linux-gnu/libcidn.so.1 lrwxrwxrwx 1 root root 19 Dec 4 2019 libcilkrts.so.5 -> libcilkrts.so.5.0.0 -rw-r--r-- 1 root root 117K Dec 4 2019 libcilkrts.so.5.0.0 lrwxrwxrwx 1 root root 18 Jul 23 2017 libcolord.so.2 -> libcolord.so.2.0.5 -rw-r--r-- 1 root root 126K Jul 23 2017 libcolord.so.2.0.5 lrwxrwxrwx 1 root root 25 Jul 23 2017 libcolordprivate.so.2 -> libcolordprivate.so.2.0.5 -rw-r--r-- 1 root root 188K Jul 23 2017 libcolordprivate.so.2.0.5 lrwxrwxrwx 1 root root 21 Dec 16 2017 libcroco-0.6.so.3 -> libcroco-0.6.so.3.0.1 -rw-r--r-- 1 root root 235K Dec 16 2017 libcroco-0.6.so.3.0.1 -rw-r--r-- 1 root root 61K May 3 10:19 libcrypt.a lrwxrwxrwx 1 root root 35 May 3 10:19 libcrypt.so -> /lib/x86_64-linux-gnu/libcrypt.so.1 -rw-r--r-- 1 root root 2.8M May 3 17:51 libcrypto.so.1.1 lrwxrwxrwx 1 root root 19 Mar 13 2017 libcrystalhd.so.3 -> libcrystalhd.so.3.6 -rw-r--r-- 1 root root 109K Mar 13 2017 libcrystalhd.so.3.6 lrwxrwxrwx 1 root root 15 Aug 9 2019 libcublas.so -> libcublas.so.10 lrwxrwxrwx 1 root root 23 Aug 9 2019 libcublas.so.10 -> libcublas.so.10.2.1.243 -rw-r--r-- 1 root root 60M Aug 9 2019 libcublas.so.10.2.1.243 lrwxrwxrwx 1 root root 17 Aug 9 2019 libcublasLt.so -> libcublasLt.so.10 lrwxrwxrwx 1 root root 25 Aug 9 2019 libcublasLt.so.10 -> libcublasLt.so.10.2.1.243 -rw-r--r-- 1 root root 29M Aug 9 2019 libcublasLt.so.10.2.1.243 -rw-r--r-- 1 root root 32M Aug 9 2019 libcublasLt_static.a -rw-r--r-- 1 root root 72M Aug 9 2019 libcublas_static.a lrwxrwxrwx 1 root root 29 May 28 20:19 libcudnn.so -> /etc/alternatives/libcudnn_so lrwxrwxrwx 1 root root 17 Oct 27 2019 libcudnn.so.7 -> libcudnn.so.7.6.5 -rw-r--r-- 1 root root 409M Oct 27 2019 libcudnn.so.7.6.5 lrwxrwxrwx 1 root root 32 May 28 20:19 libcudnn_static.a -> /etc/alternatives/libcudnn_stlib -rw-r--r-- 1 root root 386M Oct 27 2019 libcudnn_static_v7.a -rw-r--r-- 1 root root 558K May 27 15:03 libcups.so.2 lrwxrwxrwx 1 root root 19 Jun 22 16:00 libcurl-gnutls.so.3 -> libcurl-gnutls.so.4 lrwxrwxrwx 1 root root 23 Jun 22 16:00 libcurl-gnutls.so.4 -> libcurl-gnutls.so.4.5.0 -rw-r--r-- 1 root root 515K Jun 22 16:00 libcurl-gnutls.so.4.5.0 lrwxrwxrwx 1 root root 16 Jun 22 16:00 libcurl.so.4 -> libcurl.so.4.5.0 -rw-r--r-- 1 root root 519K Jun 22 16:00 libcurl.so.4.5.0 lrwxrwxrwx 1 root root 16 Feb 20 2018 libdap.so.25 -> libdap.so.25.0.1 -rw-r--r-- 1 root root 1.6M Feb 20 2018 libdap.so.25.0.1 lrwxrwxrwx 1 root root 21 Feb 20 2018 libdapclient.so.6 -> libdapclient.so.6.1.7 -rw-r--r-- 1 root root 259K Feb 20 2018 libdapclient.so.6.1.7 lrwxrwxrwx 1 root root 18 Feb 14 2018 libdatrie.so.1 -> libdatrie.so.1.3.3 -rw-r--r-- 1 root root 26K Feb 14 2018 libdatrie.so.1.3.3 -rw-r--r-- 1 root root 1.7M Jun 3 2019 libdb-5.3.so -rw-r--r-- 1 root root 365K Jan 15 2017 libdc1394.a lrwxrwxrwx 1 root root 19 Jan 15 2017 libdc1394.so -> libdc1394.so.22.2.1 lrwxrwxrwx 1 root root 19 Jan 15 2017 libdc1394.so.22 -> libdc1394.so.22.2.1 -rw-r--r-- 1 root root 216K Jan 15 2017 libdc1394.so.22.2.1 lrwxrwxrwx 1 root root 17 Mar 29 2018 libdconf.so.1 -> libdconf.so.1.0.0 -rw-r--r-- 1 root root 56K Mar 29 2018 libdconf.so.1.0.0 lrwxrwxrwx 1 root root 25 Jun 8 2016 libdebconfclient.so.0 -> libdebconfclient.so.0.0.0 -rw-r--r-- 1 root root 11K Jun 8 2016 libdebconfclient.so.0.0.0 -rw-r--r-- 1 root root 12K May 3 10:19 libdl.a lrwxrwxrwx 1 root root 32 May 3 10:19 libdl.so -> /lib/x86_64-linux-gnu/libdl.so.2 lrwxrwxrwx 1 root root 15 May 11 2020 libdrm.so.2 -> libdrm.so.2.4.0 -rw-r--r-- 1 root root 67K May 11 2020 libdrm.so.2.4.0 lrwxrwxrwx 1 root root 22 May 11 2020 libdrm_amdgpu.so.1 -> libdrm_amdgpu.so.1.0.0 -rw-r--r-- 1 root root 39K May 11 2020 libdrm_amdgpu.so.1.0.0 lrwxrwxrwx 1 root root 21 May 11 2020 libdrm_intel.so.1 -> libdrm_intel.so.1.0.0 -rw-r--r-- 1 root root 140K May 11 2020 libdrm_intel.so.1.0.0 lrwxrwxrwx 1 root root 23 May 11 2020 libdrm_nouveau.so.2 -> libdrm_nouveau.so.2.0.0 -rw-r--r-- 1 root root 31K May 11 2020 libdrm_nouveau.so.2.0.0 lrwxrwxrwx 1 root root 22 May 11 2020 libdrm_radeon.so.1 -> libdrm_radeon.so.1.0.1 -rw-r--r-- 1 root root 47K May 11 2020 libdrm_radeon.so.1.0.1 lrwxrwxrwx 1 root root 17 Jun 19 2017 libedit.so.2 -> libedit.so.2.0.56 -rw-r--r-- 1 root root 204K Jun 19 2017 libedit.so.2.0.56 -rw-r--r-- 1 root root 103K Jun 7 2019 libelf-0.170.so lrwxrwxrwx 1 root root 15 Jun 7 2019 libelf.so.1 -> libelf-0.170.so lrwxrwxrwx 1 root root 17 Nov 13 2017 libepoxy.so.0 -> libepoxy.so.0.0.0 -rw-r--r-- 1 root root 1.1M Nov 13 2017 libepoxy.so.0.0.0 lrwxrwxrwx 1 root root 19 Apr 27 2016 libepsilon.so.1 -> libepsilon.so.1.0.0 -rw-r--r-- 1 root root 97K Apr 27 2016 libepsilon.so.1.0.0 -rw-r--r-- 1 root root 400K Nov 6 2020 libexif.a lrwxrwxrwx 1 root root 17 Nov 6 2020 libexif.so -> libexif.so.12.3.3 lrwxrwxrwx 1 root root 17 Nov 6 2020 libexif.so.12 -> libexif.so.12.3.3 -rw-r--r-- 1 root root 274K Nov 6 2020 libexif.so.12.3.3 lrwxrwxrwx 1 root root 18 Mar 8 2022 libexpatw.so.1 -> libexpatw.so.1.6.7 -rw-r--r-- 1 root root 203K Mar 8 2022 libexpatw.so.1.6.7 lrwxrwxrwx 1 root root 18 Jan 4 2018 libfabric.so.1 -> libfabric.so.1.9.3 -rw-r--r-- 1 root root 676K Jan 4 2018 libfabric.so.1.9.3 lrwxrwxrwx 1 root root 15 Jan 7 2018 libffi.so.6 -> libffi.so.6.0.4 -rw-r--r-- 1 root root 31K Jan 7 2018 libffi.so.6.0.4 lrwxrwxrwx 1 root root 23 Apr 5 2018 libfontconfig.so.1 -> libfontconfig.so.1.10.1 -rw-r--r-- 1 root root 276K Apr 5 2018 libfontconfig.so.1.10.1 lrwxrwxrwx 1 root root 14 May 23 2018 libform.so.5 -> libform.so.5.9 -rw-r--r-- 1 root root 60K May 23 2018 libform.so.5.9 lrwxrwxrwx 1 root root 15 May 23 2018 libformw.so.5 -> libformw.so.5.9 -rw-r--r-- 1 root root 68K May 23 2018 libformw.so.5.9 lrwxrwxrwx 1 root root 18 Jul 6 11:25 libfreebl3.chk -> nss/libfreebl3.chk lrwxrwxrwx 1 root root 17 Jul 6 11:25 libfreebl3.so -> nss/libfreebl3.so lrwxrwxrwx 1 root root 22 Jul 6 11:25 libfreeblpriv3.chk -> nss/libfreeblpriv3.chk lrwxrwxrwx 1 root root 21 Jul 6 11:25 libfreeblpriv3.so -> nss/libfreeblpriv3.so lrwxrwxrwx 1 root root 21 Jul 19 16:39 libfreetype.so.6 -> libfreetype.so.6.15.0 -rw-r--r-- 1 root root 719K Jul 19 16:39 libfreetype.so.6.15.0 lrwxrwxrwx 1 root root 18 Feb 22 2018 libfreexl.so.1 -> libfreexl.so.1.1.0 -rw-r--r-- 1 root root 34K Feb 22 2018 libfreexl.so.1.1.0 lrwxrwxrwx 1 root root 16 Aug 19 2016 libfyba.so.0 -> libfyba.so.0.0.0 -rw-r--r-- 1 root root 215K Aug 19 2016 libfyba.so.0.0.0 lrwxrwxrwx 1 root root 16 Aug 19 2016 libfygm.so.0 -> libfygm.so.0.0.0 -rw-r--r-- 1 root root 27K Aug 19 2016 libfygm.so.0.0.0 lrwxrwxrwx 1 root root 16 Aug 19 2016 libfyut.so.0 -> libfyut.so.0.0.0 -rw-r--r-- 1 root root 35K Aug 19 2016 libfyut.so.0.0.0 -rw-r--r-- 1 root root 1.2K May 3 10:19 libg.a lrwxrwxrwx 1 root root 14 Aug 30 2021 libgd.so.3 -> libgd.so.3.0.5 -rw-r--r-- 1 root root 397K Aug 30 2021 libgd.so.3.0.5 lrwxrwxrwx 1 root root 16 Mar 14 2018 libgdbm.so.5 -> libgdbm.so.5.0.0 -rw-r--r-- 1 root root 51K Mar 14 2018 libgdbm.so.5.0.0 lrwxrwxrwx 1 root root 23 Mar 14 2018 libgdbm_compat.so.4 -> libgdbm_compat.so.4.0.0 -rw-r--r-- 1 root root 14K Mar 14 2018 libgdbm_compat.so.4.0.0 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmCommon.so -> libgdcmCommon.so.2.8 lrwxrwxrwx 1 root root 22 Feb 5 2018 libgdcmCommon.so.2.8 -> libgdcmCommon.so.2.8.4 -rw-r--r-- 1 root root 155K Feb 5 2018 libgdcmCommon.so.2.8.4 lrwxrwxrwx 1 root root 18 Feb 5 2018 libgdcmDICT.so -> libgdcmDICT.so.2.8 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmDICT.so.2.8 -> libgdcmDICT.so.2.8.4 -rw-r--r-- 1 root root 2.4M Feb 5 2018 libgdcmDICT.so.2.8.4 lrwxrwxrwx 1 root root 18 Feb 5 2018 libgdcmDSED.so -> libgdcmDSED.so.2.8 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmDSED.so.2.8 -> libgdcmDSED.so.2.8.4 -rw-r--r-- 1 root root 1.1M Feb 5 2018 libgdcmDSED.so.2.8.4 lrwxrwxrwx 1 root root 17 Feb 5 2018 libgdcmIOD.so -> libgdcmIOD.so.2.8 lrwxrwxrwx 1 root root 19 Feb 5 2018 libgdcmIOD.so.2.8 -> libgdcmIOD.so.2.8.4 -rw-r--r-- 1 root root 87K Feb 5 2018 libgdcmIOD.so.2.8.4 lrwxrwxrwx 1 root root 18 Feb 5 2018 libgdcmMEXD.so -> libgdcmMEXD.so.2.8 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmMEXD.so.2.8 -> libgdcmMEXD.so.2.8.4 -rw-r--r-- 1 root root 1.1M Feb 5 2018 libgdcmMEXD.so.2.8.4 lrwxrwxrwx 1 root root 18 Feb 5 2018 libgdcmMSFF.so -> libgdcmMSFF.so.2.8 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmMSFF.so.2.8 -> libgdcmMSFF.so.2.8.4 -rw-r--r-- 1 root root 2.6M Feb 5 2018 libgdcmMSFF.so.2.8.4 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmjpeg12.so -> libgdcmjpeg12.so.2.8 lrwxrwxrwx 1 root root 22 Feb 5 2018 libgdcmjpeg12.so.2.8 -> libgdcmjpeg12.so.2.8.4 -rw-r--r-- 1 root root 158K Feb 5 2018 libgdcmjpeg12.so.2.8.4 lrwxrwxrwx 1 root root 20 Feb 5 2018 libgdcmjpeg16.so -> libgdcmjpeg16.so.2.8 lrwxrwxrwx 1 root root 22 Feb 5 2018 libgdcmjpeg16.so.2.8 -> libgdcmjpeg16.so.2.8.4 -rw-r--r-- 1 root root 158K Feb 5 2018 libgdcmjpeg16.so.2.8.4 lrwxrwxrwx 1 root root 19 Feb 5 2018 libgdcmjpeg8.so -> libgdcmjpeg8.so.2.8 lrwxrwxrwx 1 root root 21 Feb 5 2018 libgdcmjpeg8.so.2.8 -> libgdcmjpeg8.so.2.8.4 -rw-r--r-- 1 root root 158K Feb 5 2018 libgdcmjpeg8.so.2.8.4 lrwxrwxrwx 1 root root 21 Jun 5 2019 libgdk-3.so.0 -> libgdk-3.so.0.2200.30 -rw-r--r-- 1 root root 981K Jun 5 2019 libgdk-3.so.0.2200.30 lrwxrwxrwx 1 root root 27 Mar 24 2018 libgdk-x11-2.0.so.0 -> libgdk-x11-2.0.so.0.2400.32 -rw-r--r-- 1 root root 724K Mar 24 2018 libgdk-x11-2.0.so.0.2400.32 lrwxrwxrwx 1 root root 29 Mar 16 2018 libgdk_pixbuf-2.0.so.0 -> libgdk_pixbuf-2.0.so.0.3611.0 -rw-r--r-- 1 root root 144K Mar 16 2018 libgdk_pixbuf-2.0.so.0.3611.0 lrwxrwxrwx 1 root root 34 Mar 16 2018 libgdk_pixbuf_xlib-2.0.so.0 -> libgdk_pixbuf_xlib-2.0.so.0.3611.0 -rw-r--r-- 1 root root 67K Mar 16 2018 libgdk_pixbuf_xlib-2.0.so.0.3611.0 -rw-r--r-- 1 root root 1.6M Mar 1 2018 libgeos-3.6.2.so lrwxrwxrwx 1 root root 19 Mar 1 2018 libgeos_c.so.1 -> libgeos_c.so.1.10.2 -rw-r--r-- 1 root root 187K Mar 1 2018 libgeos_c.so.1.10.2 lrwxrwxrwx 1 root root 19 Nov 17 2016 libgeotiff.so.2 -> libgeotiff.so.2.1.2 -rw-r--r-- 1 root root 215K Nov 17 2016 libgeotiff.so.2.1.2 lrwxrwxrwx 1 root root 20 Dec 4 2019 libgfortran.so.4 -> libgfortran.so.4.0.0 -rw-r--r-- 1 root root 1.9M Dec 4 2019 libgfortran.so.4.0.0 lrwxrwxrwx 1 root root 15 Aug 19 2019 libgif.so.7 -> libgif.so.7.0.0 -rw-r--r-- 1 root root 34K Aug 19 2019 libgif.so.7.0.0 -rw-r--r-- 1 root root 4.0M Nov 29 2021 libgio-2.0.a lrwxrwxrwx 1 root root 22 Nov 29 2021 libgio-2.0.so -> libgio-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 22 Nov 29 2021 libgio-2.0.so.0 -> libgio-2.0.so.0.5600.4 -rw-r--r-- 1 root root 1.7M Nov 29 2021 libgio-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 28 Apr 15 2018 libgirepository-1.0.so.1 -> libgirepository-1.0.so.1.0.0 -rw-r--r-- 1 root root 208K Apr 15 2018 libgirepository-1.0.so.1.0.0 lrwxrwxrwx 1 root root 17 Sep 27 2017 libgl2ps.so.1.4 -> libgl2ps.so.1.4.0 -rw-r--r-- 1 root root 79K Sep 27 2017 libgl2ps.so.1.4.0 lrwxrwxrwx 1 root root 17 Jun 12 2020 libglapi.so.0 -> libglapi.so.0.0.0 -rw-r--r-- 1 root root 215K Jun 12 2020 libglapi.so.0.0.0 -rw-r--r-- 1 root root 2.0M Nov 29 2021 libglib-2.0.a lrwxrwxrwx 1 root root 23 Nov 29 2021 libglib-2.0.so -> libglib-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 23 Nov 29 2021 libglib-2.0.so.0 -> libglib-2.0.so.0.5600.4 -rw-r--r-- 1 root root 1.1M Nov 29 2021 libglib-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 15 Dec 7 2017 libgme.so.0 -> libgme.so.0.6.2 -rw-r--r-- 1 root root 303K Dec 7 2017 libgme.so.0.6.2 -rw-r--r-- 1 root root 16K Nov 29 2021 libgmodule-2.0.a lrwxrwxrwx 1 root root 26 Nov 29 2021 libgmodule-2.0.so -> libgmodule-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 26 Nov 29 2021 libgmodule-2.0.so.0 -> libgmodule-2.0.so.0.5600.4 -rw-r--r-- 1 root root 14K Nov 29 2021 libgmodule-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 16 Jan 24 2018 libgmp.so.10 -> libgmp.so.10.3.2 -rw-r--r-- 1 root root 515K Jan 24 2018 libgmp.so.10.3.2 lrwxrwxrwx 1 root root 21 Aug 25 2021 libgnutls.so.30 -> libgnutls.so.30.14.10 -rw-r--r-- 1 root root 1.4M Aug 25 2021 libgnutls.so.30.14.10 -rw-r--r-- 1 root root 667K Nov 29 2021 libgobject-2.0.a lrwxrwxrwx 1 root root 26 Nov 29 2021 libgobject-2.0.so -> libgobject-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 26 Nov 29 2021 libgobject-2.0.so.0 -> libgobject-2.0.so.0.5600.4 -rw-r--r-- 1 root root 335K Nov 29 2021 libgobject-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 16 Mar 10 2020 libgomp.so.1 -> libgomp.so.1.0.0 -rw-r--r-- 1 root root 188K Mar 10 2020 libgomp.so.1.0.0 drwxr-xr-x 3 root root 4.0K Aug 16 08:53 libgphoto2 drwxr-xr-x 3 root root 4.0K Aug 16 08:53 libgphoto2-dev -rw-r--r-- 1 root root 262K Dec 23 2017 libgphoto2.a lrwxrwxrwx 1 root root 19 Dec 23 2017 libgphoto2.so -> libgphoto2.so.6.0.0 lrwxrwxrwx 1 root root 19 Dec 23 2017 libgphoto2.so.6 -> libgphoto2.so.6.0.0 -rw-r--r-- 1 root root 143K Dec 23 2017 libgphoto2.so.6.0.0 drwxr-xr-x 3 root root 4.0K Aug 16 08:53 libgphoto2_port -rw-r--r-- 1 root root 79K Dec 23 2017 libgphoto2_port.a lrwxrwxrwx 1 root root 25 Dec 23 2017 libgphoto2_port.so -> libgphoto2_port.so.12.0.0 lrwxrwxrwx 1 root root 25 Dec 23 2017 libgphoto2_port.so.12 -> libgphoto2_port.so.12.0.0 -rw-r--r-- 1 root root 43K Dec 23 2017 libgphoto2_port.so.12.0.0 lrwxrwxrwx 1 root root 17 Mar 11 2018 libgraphite2.so -> libgraphite2.so.3 lrwxrwxrwx 1 root root 17 Mar 11 2018 libgraphite2.so.2.0.0 -> libgraphite2.so.3 lrwxrwxrwx 1 root root 21 Mar 11 2018 libgraphite2.so.3 -> libgraphite2.so.3.0.1 -rw-r--r-- 1 root root 179K Mar 11 2018 libgraphite2.so.3.0.1 lrwxrwxrwx 1 root root 16 Apr 3 2018 libgsm.so.1 -> libgsm.so.1.0.12 -rw-r--r-- 1 root root 51K Apr 3 2018 libgsm.so.1.0.12 lrwxrwxrwx 1 root root 18 Dec 15 2017 libgssapi.so.3 -> libgssapi.so.3.0.0 -rw-r--r-- 1 root root 260K Dec 15 2017 libgssapi.so.3.0.0 lrwxrwxrwx 1 root root 21 Nov 11 2020 libgssapi_krb5.so.2 -> libgssapi_krb5.so.2.2 -rw-r--r-- 1 root root 299K Nov 11 2020 libgssapi_krb5.so.2.2 -rw-r--r-- 1 root root 2.7K Nov 29 2021 libgthread-2.0.a lrwxrwxrwx 1 root root 26 Nov 29 2021 libgthread-2.0.so -> libgthread-2.0.so.0.5600.4 lrwxrwxrwx 1 root root 26 Nov 29 2021 libgthread-2.0.so.0 -> libgthread-2.0.so.0.5600.4 -rw-r--r-- 1 root root 5.9K Nov 29 2021 libgthread-2.0.so.0.5600.4 drwxr-xr-x 2 root root 4.0K Aug 16 08:53 libgtk-3-0 lrwxrwxrwx 1 root root 21 Jun 5 2019 libgtk-3.so.0 -> libgtk-3.so.0.2200.30 -rw-r--r-- 1 root root 7.1M Jun 5 2019 libgtk-3.so.0.2200.30 lrwxrwxrwx 1 root root 27 Mar 24 2018 libgtk-x11-2.0.so.0 -> libgtk-x11-2.0.so.0.2400.32 -rw-r--r-- 1 root root 4.3M Mar 24 2018 libgtk-x11-2.0.so.0.2400.32 drwxr-xr-x 2 root root 4.0K Aug 16 08:53 libgtk2.0-0 lrwxrwxrwx 1 root root 19 Feb 3 2017 libgts-0.7.so.5 -> libgts-0.7.so.5.0.1 -rw-r--r-- 1 root root 371K Feb 3 2017 libgts-0.7.so.5.0.1 lrwxrwxrwx 1 root root 15 Mar 24 2018 libgvc.so -> libgvc.so.6.0.0 lrwxrwxrwx 1 root root 15 Mar 24 2018 libgvc.so.6 -> libgvc.so.6.0.0 -rw-r--r-- 1 root root 613K Mar 24 2018 libgvc.so.6.0.0 lrwxrwxrwx 1 root root 16 Mar 24 2018 libgvpr.so -> libgvpr.so.2.0.0 lrwxrwxrwx 1 root root 16 Mar 24 2018 libgvpr.so.2 -> libgvpr.so.2.0.0 -rw-r--r-- 1 root root 480K Mar 24 2018 libgvpr.so.2.0.0 -rw-r--r-- 1 root root 67K Apr 13 2018 libharfbuzz-gobject.a lrwxrwxrwx 1 root root 32 Apr 13 2018 libharfbuzz-gobject.so -> libharfbuzz-gobject.so.0.10702.0 lrwxrwxrwx 1 root root 32 Apr 13 2018 libharfbuzz-gobject.so.0 -> libharfbuzz-gobject.so.0.10702.0 -rw-r--r-- 1 root root 55K Apr 13 2018 libharfbuzz-gobject.so.0.10702.0 -rw-r--r-- 1 root root 7.0K Apr 13 2018 libharfbuzz-icu.a lrwxrwxrwx 1 root root 28 Apr 13 2018 libharfbuzz-icu.so -> libharfbuzz-icu.so.0.10702.0 lrwxrwxrwx 1 root root 28 Apr 13 2018 libharfbuzz-icu.so.0 -> libharfbuzz-icu.so.0.10702.0 -rw-r--r-- 1 root root 11K Apr 13 2018 libharfbuzz-icu.so.0.10702.0 -rw-r--r-- 1 root root 965K Apr 13 2018 libharfbuzz.a lrwxrwxrwx 1 root root 24 Apr 13 2018 libharfbuzz.so -> libharfbuzz.so.0.10702.0 lrwxrwxrwx 1 root root 24 Apr 13 2018 libharfbuzz.so.0 -> libharfbuzz.so.0.10702.0 -rw-r--r-- 1 root root 631K Apr 13 2018 libharfbuzz.so.0.10702.0 lrwxrwxrwx 1 root root 19 Dec 15 2017 libhcrypto.so.4 -> libhcrypto.so.4.1.0 -rw-r--r-- 1 root root 213K Dec 15 2017 libhcrypto.so.4.1.0 lrwxrwxrwx 1 root root 26 Aug 13 2017 libhdf5_openmpi.so.100 -> libhdf5_openmpi.so.100.0.1 -rw-r--r-- 1 root root 3.5M Aug 13 2017 libhdf5_openmpi.so.100.0.1 lrwxrwxrwx 1 root root 34 Aug 13 2017 libhdf5_openmpi_fortran.so.100 -> libhdf5_openmpi_fortran.so.100.0.1 -rw-r--r-- 1 root root 249K Aug 13 2017 libhdf5_openmpi_fortran.so.100.0.1 lrwxrwxrwx 1 root root 29 Aug 13 2017 libhdf5_openmpi_hl.so.100 -> libhdf5_openmpi_hl.so.100.0.0 -rw-r--r-- 1 root root 136K Aug 13 2017 libhdf5_openmpi_hl.so.100.0.0 lrwxrwxrwx 1 root root 36 Aug 13 2017 libhdf5_openmpihl_fortran.so.100 -> libhdf5_openmpihl_fortran.so.100.0.0 -rw-r--r-- 1 root root 119K Aug 13 2017 libhdf5_openmpihl_fortran.so.100.0.0 lrwxrwxrwx 1 root root 25 Aug 13 2017 libhdf5_serial.so.100 -> libhdf5_serial.so.100.0.1 -rw-r--r-- 1 root root 3.4M Aug 13 2017 libhdf5_serial.so.100.0.1 lrwxrwxrwx 1 root root 33 Aug 13 2017 libhdf5_serial_fortran.so.100 -> libhdf5_serial_fortran.so.100.0.1 -rw-r--r-- 1 root root 245K Aug 13 2017 libhdf5_serial_fortran.so.100.0.1 lrwxrwxrwx 1 root root 28 Aug 13 2017 libhdf5_serial_hl.so.100 -> libhdf5_serial_hl.so.100.0.0 -rw-r--r-- 1 root root 136K Aug 13 2017 libhdf5_serial_hl.so.100.0.0 lrwxrwxrwx 1 root root 35 Aug 13 2017 libhdf5_serialhl_fortran.so.100 -> libhdf5_serialhl_fortran.so.100.0.0 -rw-r--r-- 1 root root 119K Aug 13 2017 libhdf5_serialhl_fortran.so.100.0.0 lrwxrwxrwx 1 root root 20 Dec 15 2017 libheimbase.so.1 -> libheimbase.so.1.0.0 -rw-r--r-- 1 root root 59K Dec 15 2017 libheimbase.so.1.0.0 lrwxrwxrwx 1 root root 20 Dec 15 2017 libheimntlm.so.0 -> libheimntlm.so.0.1.0 -rw-r--r-- 1 root root 35K Dec 15 2017 libheimntlm.so.0.1.0 lrwxrwxrwx 1 root root 17 Jun 14 2021 libhogweed.so.4 -> libhogweed.so.4.5 -rw-r--r-- 1 root root 215K Jun 14 2021 libhogweed.so.4.5 lrwxrwxrwx 1 root root 13 Jan 19 2018 libhwloc.so.0 -> libhwloc.so.5 lrwxrwxrwx 1 root root 13 Jan 19 2018 libhwloc.so.1 -> libhwloc.so.5 lrwxrwxrwx 1 root root 13 Jan 19 2018 libhwloc.so.2 -> libhwloc.so.5 lrwxrwxrwx 1 root root 13 Jan 19 2018 libhwloc.so.3 -> libhwloc.so.5 lrwxrwxrwx 1 root root 13 Jan 19 2018 libhwloc.so.4 -> libhwloc.so.5 lrwxrwxrwx 1 root root 17 Jan 19 2018 libhwloc.so.5 -> libhwloc.so.5.7.6 -rw-r--r-- 1 root root 244K Jan 19 2018 libhwloc.so.5.7.6 lrwxrwxrwx 1 root root 17 Dec 15 2017 libhx509.so.5 -> libhx509.so.5.0.0 -rw-r--r-- 1 root root 294K Dec 15 2017 libhx509.so.5.0.0 lrwxrwxrwx 1 root root 22 Aug 5 2019 libibverbs.so.1 -> libibverbs.so.1.1.17.1 -rw-r--r-- 1 root root 87K Aug 5 2019 libibverbs.so.1.1.17.1 -rw-r--r-- 1 root root 55K Dec 9 2017 libicu-le-hb.a lrwxrwxrwx 1 root root 21 Dec 9 2017 libicu-le-hb.so -> libicu-le-hb.so.0.0.0 lrwxrwxrwx 1 root root 21 Dec 9 2017 libicu-le-hb.so.0 -> libicu-le-hb.so.0.0.0 -rw-r--r-- 1 root root 35K Dec 9 2017 libicu-le-hb.so.0.0.0 -rw-r--r-- 1 root root 26M Oct 19 2021 libicudata.a lrwxrwxrwx 1 root root 18 Oct 19 2021 libicudata.so -> libicudata.so.60.2 lrwxrwxrwx 1 root root 18 Oct 19 2021 libicudata.so.60 -> libicudata.so.60.2 -rw-r--r-- 1 root root 26M Oct 19 2021 libicudata.so.60.2 -rw-r--r-- 1 root root 5.9M Oct 19 2021 libicui18n.a lrwxrwxrwx 1 root root 18 Oct 19 2021 libicui18n.so -> libicui18n.so.60.2 lrwxrwxrwx 1 root root 18 Oct 19 2021 libicui18n.so.60 -> libicui18n.so.60.2 -rw-r--r-- 1 root root 2.7M Oct 19 2021 libicui18n.so.60.2 -rw-r--r-- 1 root root 85K Oct 19 2021 libicuio.a lrwxrwxrwx 1 root root 16 Oct 19 2021 libicuio.so -> libicuio.so.60.2 lrwxrwxrwx 1 root root 16 Oct 19 2021 libicuio.so.60 -> libicuio.so.60.2 -rw-r--r-- 1 root root 55K Oct 19 2021 libicuio.so.60.2 -rw-r--r-- 1 root root 75K Oct 19 2021 libiculx.a lrwxrwxrwx 1 root root 16 Oct 19 2021 libiculx.so -> libiculx.so.60.2 lrwxrwxrwx 1 root root 16 Oct 19 2021 libiculx.so.60 -> libiculx.so.60.2 -rw-r--r-- 1 root root 47K Oct 19 2021 libiculx.so.60.2 -rw-r--r-- 1 root root 109K Oct 19 2021 libicutest.a lrwxrwxrwx 1 root root 18 Oct 19 2021 libicutest.so -> libicutest.so.60.2 lrwxrwxrwx 1 root root 18 Oct 19 2021 libicutest.so.60 -> libicutest.so.60.2 -rw-r--r-- 1 root root 64K Oct 19 2021 libicutest.so.60.2 -rw-r--r-- 1 root root 359K Oct 19 2021 libicutu.a lrwxrwxrwx 1 root root 16 Oct 19 2021 libicutu.so -> libicutu.so.60.2 lrwxrwxrwx 1 root root 16 Oct 19 2021 libicutu.so.60 -> libicutu.so.60.2 -rw-r--r-- 1 root root 199K Oct 19 2021 libicutu.so.60.2 -rw-r--r-- 1 root root 3.1M Oct 19 2021 libicuuc.a lrwxrwxrwx 1 root root 16 Oct 19 2021 libicuuc.so -> libicuuc.so.60.2 lrwxrwxrwx 1 root root 16 Oct 19 2021 libicuuc.so.60 -> libicuuc.so.60.2 -rw-r--r-- 1 root root 1.8M Oct 19 2021 libicuuc.so.60.2 lrwxrwxrwx 1 root root 16 Oct 24 2019 libidn2.so.0 -> libidn2.so.0.3.3 -rw-r--r-- 1 root root 114K Oct 24 2019 libidn2.so.0.3.3 lrwxrwxrwx 1 root root 20 Jan 14 2018 libinfinipath.so.4 -> libinfinipath.so.4.0 -rw-r--r-- 1 root root 59K Jan 14 2018 libinfinipath.so.4.0 lrwxrwxrwx 1 root root 16 Mar 9 2018 libisl.so.19 -> libisl.so.19.0.0 -rw-r--r-- 1 root root 1.6M Mar 9 2018 libisl.so.19.0.0 lrwxrwxrwx 1 root root 15 Mar 10 2020 libitm.so.1 -> libitm.so.1.0.0 -rw-r--r-- 1 root root 111K Mar 10 2020 libitm.so.1.0.0 -rw-r--r-- 1 root root 51K Apr 3 2018 libjbig.a lrwxrwxrwx 1 root root 12 Apr 3 2018 libjbig.so -> libjbig.so.0 -rw-r--r-- 1 root root 57K Apr 3 2018 libjbig.so.0 -rw-r--r-- 1 root root 579K Jun 4 2020 libjpeg.a lrwxrwxrwx 1 root root 16 Jun 4 2020 libjpeg.so -> libjpeg.so.8.1.2 lrwxrwxrwx 1 root root 16 Jun 4 2020 libjpeg.so.8 -> libjpeg.so.8.1.2 -rw-r--r-- 1 root root 415K Jun 4 2020 libjpeg.so.8.1.2 lrwxrwxrwx 1 root root 27 Apr 6 2020 libjson-glib-1.0.so.0 -> libjson-glib-1.0.so.0.400.2 -rw-r--r-- 1 root root 154K Apr 6 2020 libjson-glib-1.0.so.0.400.2 lrwxrwxrwx 1 root root 19 Aug 23 2016 libjsoncpp.so.1 -> libjsoncpp.so.1.7.4 -rw-r--r-- 1 root root 199K Aug 23 2016 libjsoncpp.so.1.7.4 lrwxrwxrwx 1 root root 18 Nov 11 2020 libk5crypto.so.3 -> libk5crypto.so.3.1 -rw-r--r-- 1 root root 195K Nov 11 2020 libk5crypto.so.3.1 lrwxrwxrwx 1 root root 19 Sep 25 2017 libkmlbase.so.1 -> libkmlbase.so.1.3.0 -rw-r--r-- 1 root root 107K Sep 25 2017 libkmlbase.so.1.3.0 lrwxrwxrwx 1 root root 18 Sep 25 2017 libkmldom.so.1 -> libkmldom.so.1.3.0 -rw-r--r-- 1 root root 731K Sep 25 2017 libkmldom.so.1.3.0 lrwxrwxrwx 1 root root 21 Sep 25 2017 libkmlengine.so.1 -> libkmlengine.so.1.3.0 -rw-r--r-- 1 root root 223K Sep 25 2017 libkmlengine.so.1.3.0 lrwxrwxrwx 1 root root 17 Dec 15 2017 libkrb5.so.26 -> libkrb5.so.26.0.0 -rw-r--r-- 1 root root 561K Dec 15 2017 libkrb5.so.26.0.0 lrwxrwxrwx 1 root root 14 Nov 11 2020 libkrb5.so.3 -> libkrb5.so.3.3 -rw-r--r-- 1 root root 857K Nov 11 2020 libkrb5.so.3.3 lrwxrwxrwx 1 root root 21 Nov 11 2020 libkrb5support.so.0 -> libkrb5support.so.0.1 -rw-r--r-- 1 root root 43K Nov 11 2020 libkrb5support.so.0.1 lrwxrwxrwx 1 root root 17 Sep 3 2016 libksba.so.8 -> libksba.so.8.11.6 -rw-r--r-- 1 root root 223K Sep 3 2016 libksba.so.8.11.6 lrwxrwxrwx 1 root root 21 Mar 24 2018 liblab_gamut.so -> liblab_gamut.so.1.0.0 lrwxrwxrwx 1 root root 21 Mar 24 2018 liblab_gamut.so.1 -> liblab_gamut.so.1.0.0 -rw-r--r-- 1 root root 2.4M Mar 24 2018 liblab_gamut.so.1.0.0 lrwxrwxrwx 1 root root 46 Aug 16 08:54 liblapack.a -> /etc/alternatives/liblapack.a-x86_64-linux-gnu lrwxrwxrwx 1 root root 47 Aug 16 08:54 liblapack.so -> /etc/alternatives/liblapack.so-x86_64-linux-gnu lrwxrwxrwx 1 root root 49 Aug 16 08:54 liblapack.so.3 -> /etc/alternatives/liblapack.so.3-x86_64-linux-gnu lrwxrwxrwx 1 root root 21 May 12 13:52 liblber-2.4.so.2 -> liblber-2.4.so.2.10.8 -rw-r--r-- 1 root root 55K May 12 13:52 liblber-2.4.so.2.10.8 lrwxrwxrwx 1 root root 17 Sep 20 2018 liblcms2.so.2 -> liblcms2.so.2.0.8 -rw-r--r-- 1 root root 349K Sep 20 2018 liblcms2.so.2.0.8 lrwxrwxrwx 1 root root 18 May 12 13:52 libldap-2.4.so.2 -> libldap_r-2.4.so.2 lrwxrwxrwx 1 root root 23 May 12 13:52 libldap_r-2.4.so.2 -> libldap_r-2.4.so.2.10.8 -rw-r--r-- 1 root root 320K May 12 13:52 libldap_r-2.4.so.2.10.8 lrwxrwxrwx 1 root root 16 Mar 2 2018 liblept.so.5 -> liblept.so.5.0.2 -rw-r--r-- 1 root root 2.4M Mar 2 2018 liblept.so.5.0.2 lrwxrwxrwx 1 root root 16 Mar 10 2020 liblsan.so.0 -> liblsan.so.0.0.0 -rw-r--r-- 1 root root 338K Mar 10 2020 liblsan.so.0.0.0 lrwxrwxrwx 1 root root 16 Aug 20 2016 libltdl.so.7 -> libltdl.so.7.3.1 -rw-r--r-- 1 root root 39K Aug 20 2016 libltdl.so.7.3.1 lrwxrwxrwx 1 root root 15 May 20 2021 liblz4.so.1 -> liblz4.so.1.7.1 -rw-r--r-- 1 root root 111K May 20 2021 liblz4.so.1.7.1 -rw-r--r-- 1 root root 261K Apr 8 12:56 liblzma.a lrwxrwxrwx 1 root root 38 Apr 8 12:56 liblzma.so -> /lib/x86_64-linux-gnu/liblzma.so.5.2.2 -rw-r--r-- 1 root root 3.5M May 3 10:19 libm-2.27.a -rw-r--r-- 1 root root 132 May 3 10:19 libm.a -rw-r--r-- 1 root root 186 May 3 10:19 libm.so lrwxrwxrwx 1 root root 34 Feb 5 2018 libmca_common_libfabric.so.20 -> libmca_common_libfabric.so.20.10.0 lrwxrwxrwx 1 root root 46 Feb 5 2018 libmca_common_libfabric.so.20.10.0 -> openmpi/lib/libmca_common_libfabric.so.20.10.0 lrwxrwxrwx 1 root root 27 Feb 5 2018 libmca_common_sm.so.20 -> libmca_common_sm.so.20.10.1 lrwxrwxrwx 1 root root 39 Feb 5 2018 libmca_common_sm.so.20.10.1 -> openmpi/lib/libmca_common_sm.so.20.10.1 lrwxrwxrwx 1 root root 30 Feb 5 2018 libmca_common_verbs.so.20 -> libmca_common_verbs.so.20.10.0 lrwxrwxrwx 1 root root 42 Feb 5 2018 libmca_common_verbs.so.20.10.0 -> openmpi/lib/libmca_common_verbs.so.20.10.0 -rw-r--r-- 1 root root 1.5K May 3 10:19 libmcheck.a lrwxrwxrwx 1 root root 14 May 23 2018 libmenu.so.5 -> libmenu.so.5.9 -rw-r--r-- 1 root root 35K May 23 2018 libmenu.so.5.9 lrwxrwxrwx 1 root root 15 May 23 2018 libmenuw.so.5 -> libmenuw.so.5.9 -rw-r--r-- 1 root root 35K May 23 2018 libmenuw.so.5.9 lrwxrwxrwx 1 root root 19 Apr 3 2018 libminizip.so.1 -> libminizip.so.1.0.0 -rw-r--r-- 1 root root 43K Apr 3 2018 libminizip.so.1.0.0 lrwxrwxrwx 1 root root 19 Oct 21 2017 libmp3lame.so.0 -> libmp3lame.so.0.0.0 -rw-r--r-- 1 root root 294K Oct 21 2017 libmp3lame.so.0.0.0 lrwxrwxrwx 1 root root 15 Jan 23 2018 libmpc.so.3 -> libmpc.so.3.1.0 -rw-r--r-- 1 root root 96K Jan 23 2018 libmpc.so.3.1.0 lrwxrwxrwx 1 root root 17 Apr 23 2018 libmpdec.so.2 -> libmpdec.so.2.4.2 -rw-r--r-- 1 root root 223K Apr 23 2018 libmpdec.so.2.4.2 lrwxrwxrwx 1 root root 16 Feb 7 2018 libmpfr.so.6 -> libmpfr.so.6.0.1 -rw-r--r-- 1 root root 512K Feb 7 2018 libmpfr.so.6.0.1 lrwxrwxrwx 1 root root 19 Mar 10 2018 libmpg123.so.0 -> libmpg123.so.0.44.8 -rw-r--r-- 1 root root 316K Mar 10 2018 libmpg123.so.0.44.8 lrwxrwxrwx 1 root root 17 Feb 5 2018 libmpi.so.20 -> libmpi.so.20.10.1 lrwxrwxrwx 1 root root 29 Feb 5 2018 libmpi.so.20.10.1 -> openmpi/lib/libmpi.so.20.10.1 lrwxrwxrwx 1 root root 21 Feb 5 2018 libmpi_cxx.so.20 -> libmpi_cxx.so.20.10.0 lrwxrwxrwx 1 root root 33 Feb 5 2018 libmpi_cxx.so.20.10.0 -> openmpi/lib/libmpi_cxx.so.20.10.0 lrwxrwxrwx 1 root root 22 Feb 5 2018 libmpi_java.so.20 -> libmpi_java.so.20.10.0 lrwxrwxrwx 1 root root 34 Feb 5 2018 libmpi_java.so.20.10.0 -> openmpi/lib/libmpi_java.so.20.10.0 lrwxrwxrwx 1 root root 23 Feb 5 2018 libmpi_mpifh.so.20 -> libmpi_mpifh.so.20.11.0 lrwxrwxrwx 1 root root 35 Feb 5 2018 libmpi_mpifh.so.20.11.0 -> openmpi/lib/libmpi_mpifh.so.20.11.0 lrwxrwxrwx 1 root root 35 Feb 5 2018 libmpi_usempi_ignore_tkr.so.20 -> libmpi_usempi_ignore_tkr.so.20.10.0 lrwxrwxrwx 1 root root 47 Feb 5 2018 libmpi_usempi_ignore_tkr.so.20.10.0 -> openmpi/lib/libmpi_usempi_ignore_tkr.so.20.10.0 lrwxrwxrwx 1 root root 27 Feb 5 2018 libmpi_usempif08.so.20 -> libmpi_usempif08.so.20.10.0 lrwxrwxrwx 1 root root 39 Feb 5 2018 libmpi_usempif08.so.20.10.0 -> openmpi/lib/libmpi_usempif08.so.20.10.0 lrwxrwxrwx 1 root root 15 Mar 10 2020 libmpx.so.2 -> libmpx.so.2.0.1 -rw-r--r-- 1 root root 19K Mar 10 2020 libmpx.so.2.0.1 lrwxrwxrwx 1 root root 23 Mar 10 2020 libmpxwrappers.so.2 -> libmpxwrappers.so.2.0.1 -rw-r--r-- 1 root root 15K Mar 10 2020 libmpxwrappers.so.2.0.1 -rw-r--r-- 1 root root 337K May 3 10:19 libmvec.a lrwxrwxrwx 1 root root 34 May 3 10:19 libmvec.so -> /lib/x86_64-linux-gnu/libmvec.so.1 -rw-r--r-- 1 root root 5.9K May 3 10:19 libmvec_nonshared.a lrwxrwxrwx 1 root root 25 Jul 26 11:28 libmysqlclient.so.20 -> libmysqlclient.so.20.3.26 -rw-r--r-- 1 root root 3.8M Jul 26 11:28 libmysqlclient.so.20.3.26 lrwxrwxrwx 1 root root 12 Oct 26 2020 libnccl.so -> libnccl.so.2 lrwxrwxrwx 1 root root 16 Oct 26 2020 libnccl.so.2 -> libnccl.so.2.8.3 -rw-r--r-- 1 root root 78M Oct 26 2020 libnccl.so.2.8.3 -rw-r--r-- 1 root root 95M Oct 26 2020 libnccl_static.a -rw-r--r-- 1 root root 1.1M Feb 9 2018 libnetcdf.so.13 lrwxrwxrwx 1 root root 22 Nov 9 2017 libnetcdf_c++.so.4 -> libnetcdf_c++.so.4.2.0 -rw-r--r-- 1 root root 119K Nov 9 2017 libnetcdf_c++.so.4.2.0 lrwxrwxrwx 1 root root 16 Jun 14 2021 libnettle.so.6 -> libnettle.so.6.5 -rw-r--r-- 1 root root 215K Jun 14 2021 libnettle.so.6.5 lrwxrwxrwx 1 root root 21 Apr 10 2018 libnghttp2.so.14 -> libnghttp2.so.14.15.2 -rw-r--r-- 1 root root 150K Apr 10 2018 libnghttp2.so.14.15.2 lrwxrwxrwx 1 root root 25 Jun 2 2017 libnl-route-3.so.200 -> libnl-route-3.so.200.24.0 -rw-r--r-- 1 root root 458K Jun 2 2017 libnl-route-3.so.200.24.0 lrwxrwxrwx 1 root root 16 Nov 12 2017 libnpth.so.0 -> libnpth.so.0.1.1 -rw-r--r-- 1 root root 15K Nov 12 2017 libnpth.so.0.1.1 -rw-r--r-- 1 root root 201K May 3 10:19 libnsl.a lrwxrwxrwx 1 root root 33 May 3 10:19 libnsl.so -> /lib/x86_64-linux-gnu/libnsl.so.1 -rw-r--r-- 1 root root 234K Feb 22 2018 libnspr4.so -rw-r--r-- 1 root root 1.3M Jul 6 11:25 libnss3.so lrwxrwxrwx 1 root root 40 May 3 10:19 libnss_compat.so -> /lib/x86_64-linux-gnu/libnss_compat.so.2 lrwxrwxrwx 1 root root 37 May 3 10:19 libnss_dns.so -> /lib/x86_64-linux-gnu/libnss_dns.so.2 lrwxrwxrwx 1 root root 39 May 3 10:19 libnss_files.so -> /lib/x86_64-linux-gnu/libnss_files.so.2 lrwxrwxrwx 1 root root 40 May 3 10:19 libnss_hesiod.so -> /lib/x86_64-linux-gnu/libnss_hesiod.so.2 lrwxrwxrwx 1 root root 37 May 3 10:19 libnss_nis.so -> /lib/x86_64-linux-gnu/libnss_nis.so.2 lrwxrwxrwx 1 root root 41 May 3 10:19 libnss_nisplus.so -> /lib/x86_64-linux-gnu/libnss_nisplus.so.2 -rw-r--r-- 1 root root 187K Jul 6 11:25 libnssutil3.so lrwxrwxrwx 1 root root 16 Jun 20 2018 libnuma.so.1 -> libnuma.so.1.0.0 -rw-r--r-- 1 root root 43K Jun 20 2018 libnuma.so.1.0.0 lrwxrwxrwx 1 root root 15 Aug 9 2019 libnvblas.so -> libnvblas.so.10 lrwxrwxrwx 1 root root 23 Aug 9 2019 libnvblas.so.10 -> libnvblas.so.10.2.1.243 -rw-r--r-- 1 root root 526K Aug 9 2019 libnvblas.so.10.2.1.243 lrwxrwxrwx 1 root root 12 Mar 26 2018 libodbc.so.1 -> libodbc.so.2 lrwxrwxrwx 1 root root 16 Mar 26 2018 libodbc.so.2 -> libodbc.so.2.0.0 -rw-r--r-- 1 root root 422K Mar 26 2018 libodbc.so.2.0.0 lrwxrwxrwx 1 root root 14 Mar 26 2018 libodbccr.so.1 -> libodbccr.so.2 lrwxrwxrwx 1 root root 18 Mar 26 2018 libodbccr.so.2 -> libodbccr.so.2.0.0 -rw-r--r-- 1 root root 43K Mar 26 2018 libodbccr.so.2.0.0 lrwxrwxrwx 1 root root 16 Mar 26 2018 libodbcinst.so.1 -> libodbcinst.so.2 lrwxrwxrwx 1 root root 20 Mar 26 2018 libodbcinst.so.2 -> libodbcinst.so.2.0.0 -rw-r--r-- 1 root root 71K Mar 26 2018 libodbcinst.so.2.0.0 lrwxrwxrwx 1 root root 15 Jun 2 2014 libogg.so.0 -> libogg.so.0.8.2 -rw-r--r-- 1 root root 34K Jun 2 2014 libogg.so.0.8.2 lrwxrwxrwx 1 root root 23 Feb 5 2018 libompitrace.so.20 -> libompitrace.so.20.10.0 lrwxrwxrwx 1 root root 35 Feb 5 2018 libompitrace.so.20.10.0 -> openmpi/lib/libompitrace.so.20.10.0 -rw-r--r-- 1 root root 1.7M Oct 20 2021 libopcodes-2.30-system.so lrwxrwxrwx 1 root root 22 Feb 5 2018 libopen-pal.so.20 -> libopen-pal.so.20.10.1 lrwxrwxrwx 1 root root 34 Feb 5 2018 libopen-pal.so.20.10.1 -> openmpi/lib/libopen-pal.so.20.10.1 lrwxrwxrwx 1 root root 22 Feb 5 2018 libopen-rte.so.20 -> libopen-rte.so.20.10.1 lrwxrwxrwx 1 root root 34 Feb 5 2018 libopen-rte.so.20.10.1 -> openmpi/lib/libopen-rte.so.20.10.1 lrwxrwxrwx 1 root root 22 Sep 19 2017 libopenblas.a -> libopenblasp-r0.2.20.a lrwxrwxrwx 1 root root 23 Sep 19 2017 libopenblas.so -> libopenblasp-r0.2.20.so lrwxrwxrwx 1 root root 23 Sep 19 2017 libopenblas.so.0 -> libopenblasp-r0.2.20.so -rw-r--r-- 1 root root 51M Sep 19 2017 libopenblasp-r0.2.20.a -rw-r--r-- 1 root root 33M Sep 19 2017 libopenblasp-r0.2.20.so -rw-r--r-- 1 root root 483K Sep 20 2018 libopencv_aruco.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_aruco.so -> libopencv_aruco.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_aruco.so.3.2 -> libopencv_aruco.so.3.2.0 -rw-r--r-- 1 root root 285K Sep 20 2018 libopencv_aruco.so.3.2.0 -rw-r--r-- 1 root root 116K Sep 20 2018 libopencv_bgsegm.a lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_bgsegm.so -> libopencv_bgsegm.so.3.2 lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_bgsegm.so.3.2 -> libopencv_bgsegm.so.3.2.0 -rw-r--r-- 1 root root 51K Sep 20 2018 libopencv_bgsegm.so.3.2.0 -rw-r--r-- 1 root root 449K Sep 20 2018 libopencv_bioinspired.a lrwxrwxrwx 1 root root 28 Sep 20 2018 libopencv_bioinspired.so -> libopencv_bioinspired.so.3.2 lrwxrwxrwx 1 root root 30 Sep 20 2018 libopencv_bioinspired.so.3.2 -> libopencv_bioinspired.so.3.2.0 -rw-r--r-- 1 root root 171K Sep 20 2018 libopencv_bioinspired.so.3.2.0 -rw-r--r-- 1 root root 2.3M Sep 20 2018 libopencv_calib3d.a lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_calib3d.so -> libopencv_calib3d.so.3.2 lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_calib3d.so.3.2 -> libopencv_calib3d.so.3.2.0 -rw-r--r-- 1 root root 1.4M Sep 20 2018 libopencv_calib3d.so.3.2.0 -rw-r--r-- 1 root root 633K Sep 20 2018 libopencv_ccalib.a lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_ccalib.so -> libopencv_ccalib.so.3.2 lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_ccalib.so.3.2 -> libopencv_ccalib.so.3.2.0 -rw-r--r-- 1 root root 363K Sep 20 2018 libopencv_ccalib.so.3.2.0 -rw-r--r-- 1 root root 4.6M Sep 20 2018 libopencv_core.a lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_core.so -> libopencv_core.so.3.2 lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_core.so.3.2 -> libopencv_core.so.3.2.0 -rw-r--r-- 1 root root 2.3M Sep 20 2018 libopencv_core.so.3.2.0 -rw-r--r-- 1 root root 1.4M Sep 20 2018 libopencv_datasets.a lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_datasets.so -> libopencv_datasets.so.3.2 lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_datasets.so.3.2 -> libopencv_datasets.so.3.2.0 -rw-r--r-- 1 root root 399K Sep 20 2018 libopencv_datasets.so.3.2.0 -rw-r--r-- 1 root root 206K Sep 20 2018 libopencv_dpm.a lrwxrwxrwx 1 root root 20 Sep 20 2018 libopencv_dpm.so -> libopencv_dpm.so.3.2 lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_dpm.so.3.2 -> libopencv_dpm.so.3.2.0 -rw-r--r-- 1 root root 99K Sep 20 2018 libopencv_dpm.so.3.2.0 -rw-r--r-- 1 root root 331K Sep 20 2018 libopencv_face.a lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_face.so -> libopencv_face.so.3.2 lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_face.so.3.2 -> libopencv_face.so.3.2.0 -rw-r--r-- 1 root root 143K Sep 20 2018 libopencv_face.so.3.2.0 -rw-r--r-- 1 root root 1.4M Sep 20 2018 libopencv_features2d.a lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_features2d.so -> libopencv_features2d.so.3.2 lrwxrwxrwx 1 root root 29 Sep 20 2018 libopencv_features2d.so.3.2 -> libopencv_features2d.so.3.2.0 -rw-r--r-- 1 root root 723K Sep 20 2018 libopencv_features2d.so.3.2.0 -rw-r--r-- 1 root root 1007K Sep 20 2018 libopencv_flann.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_flann.so -> libopencv_flann.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_flann.so.3.2 -> libopencv_flann.so.3.2.0 -rw-r--r-- 1 root root 319K Sep 20 2018 libopencv_flann.so.3.2.0 -rw-r--r-- 1 root root 43K Sep 20 2018 libopencv_freetype.a lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_freetype.so -> libopencv_freetype.so.3.2 lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_freetype.so.3.2 -> libopencv_freetype.so.3.2.0 -rw-r--r-- 1 root root 27K Sep 20 2018 libopencv_freetype.so.3.2.0 -rw-r--r-- 1 root root 73K Sep 20 2018 libopencv_fuzzy.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_fuzzy.so -> libopencv_fuzzy.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_fuzzy.so.3.2 -> libopencv_fuzzy.so.3.2.0 -rw-r--r-- 1 root root 54K Sep 20 2018 libopencv_fuzzy.so.3.2.0 -rw-r--r-- 1 root root 75K Sep 20 2018 libopencv_hdf.a lrwxrwxrwx 1 root root 20 Sep 20 2018 libopencv_hdf.so -> libopencv_hdf.so.3.2 lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_hdf.so.3.2 -> libopencv_hdf.so.3.2.0 -rw-r--r-- 1 root root 39K Sep 20 2018 libopencv_hdf.so.3.2.0 -rw-r--r-- 1 root root 94K Sep 20 2018 libopencv_highgui.a lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_highgui.so -> libopencv_highgui.so.3.2 lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_highgui.so.3.2 -> libopencv_highgui.so.3.2.0 -rw-r--r-- 1 root root 47K Sep 20 2018 libopencv_highgui.so.3.2.0 -rw-r--r-- 1 root root 756K Sep 20 2018 libopencv_imgcodecs.a lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_imgcodecs.so -> libopencv_imgcodecs.so.3.2 lrwxrwxrwx 1 root root 28 Sep 20 2018 libopencv_imgcodecs.so.3.2 -> libopencv_imgcodecs.so.3.2.0 -rw-r--r-- 1 root root 227K Sep 20 2018 libopencv_imgcodecs.so.3.2.0 -rw-r--r-- 1 root root 5.5M Sep 20 2018 libopencv_imgproc.a lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_imgproc.so -> libopencv_imgproc.so.3.2 lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_imgproc.so.3.2 -> libopencv_imgproc.so.3.2.0 -rw-r--r-- 1 root root 2.8M Sep 20 2018 libopencv_imgproc.so.3.2.0 -rw-r--r-- 1 root root 312K Sep 20 2018 libopencv_line_descriptor.a lrwxrwxrwx 1 root root 32 Sep 20 2018 libopencv_line_descriptor.so -> libopencv_line_descriptor.so.3.2 lrwxrwxrwx 1 root root 34 Sep 20 2018 libopencv_line_descriptor.so.3.2 -> libopencv_line_descriptor.so.3.2.0 -rw-r--r-- 1 root root 159K Sep 20 2018 libopencv_line_descriptor.so.3.2.0 -rw-r--r-- 1 root root 1.5M Sep 20 2018 libopencv_ml.a lrwxrwxrwx 1 root root 19 Sep 20 2018 libopencv_ml.so -> libopencv_ml.so.3.2 lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_ml.so.3.2 -> libopencv_ml.so.3.2.0 -rw-r--r-- 1 root root 679K Sep 20 2018 libopencv_ml.so.3.2.0 -rw-r--r-- 1 root root 689K Sep 20 2018 libopencv_objdetect.a lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_objdetect.so -> libopencv_objdetect.so.3.2 lrwxrwxrwx 1 root root 28 Sep 20 2018 libopencv_objdetect.so.3.2 -> libopencv_objdetect.so.3.2.0 -rw-r--r-- 1 root root 343K Sep 20 2018 libopencv_objdetect.so.3.2.0 -rw-r--r-- 1 root root 648K Sep 20 2018 libopencv_optflow.a lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_optflow.so -> libopencv_optflow.so.3.2 lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_optflow.so.3.2 -> libopencv_optflow.so.3.2.0 -rw-r--r-- 1 root root 327K Sep 20 2018 libopencv_optflow.so.3.2.0 -rw-r--r-- 1 root root 62K Sep 20 2018 libopencv_phase_unwrapping.a lrwxrwxrwx 1 root root 33 Sep 20 2018 libopencv_phase_unwrapping.so -> libopencv_phase_unwrapping.so.3.2 lrwxrwxrwx 1 root root 35 Sep 20 2018 libopencv_phase_unwrapping.so.3.2 -> libopencv_phase_unwrapping.so.3.2.0 -rw-r--r-- 1 root root 35K Sep 20 2018 libopencv_phase_unwrapping.so.3.2.0 -rw-r--r-- 1 root root 1.5M Sep 20 2018 libopencv_photo.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_photo.so -> libopencv_photo.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_photo.so.3.2 -> libopencv_photo.so.3.2.0 -rw-r--r-- 1 root root 771K Sep 20 2018 libopencv_photo.so.3.2.0 -rw-r--r-- 1 root root 66K Sep 20 2018 libopencv_plot.a lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_plot.so -> libopencv_plot.so.3.2 lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_plot.so.3.2 -> libopencv_plot.so.3.2.0 -rw-r--r-- 1 root root 43K Sep 20 2018 libopencv_plot.so.3.2.0 -rw-r--r-- 1 root root 268K Sep 20 2018 libopencv_reg.a lrwxrwxrwx 1 root root 20 Sep 20 2018 libopencv_reg.so -> libopencv_reg.so.3.2 lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_reg.so.3.2 -> libopencv_reg.so.3.2.0 -rw-r--r-- 1 root root 127K Sep 20 2018 libopencv_reg.so.3.2.0 -rw-r--r-- 1 root root 982K Sep 20 2018 libopencv_rgbd.a lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_rgbd.so -> libopencv_rgbd.so.3.2 lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_rgbd.so.3.2 -> libopencv_rgbd.so.3.2.0 -rw-r--r-- 1 root root 535K Sep 20 2018 libopencv_rgbd.so.3.2.0 -rw-r--r-- 1 root root 353K Sep 20 2018 libopencv_saliency.a lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_saliency.so -> libopencv_saliency.so.3.2 lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_saliency.so.3.2 -> libopencv_saliency.so.3.2.0 -rw-r--r-- 1 root root 179K Sep 20 2018 libopencv_saliency.so.3.2.0 -rw-r--r-- 1 root root 428K Sep 20 2018 libopencv_shape.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_shape.so -> libopencv_shape.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_shape.so.3.2 -> libopencv_shape.so.3.2.0 -rw-r--r-- 1 root root 187K Sep 20 2018 libopencv_shape.so.3.2.0 -rw-r--r-- 1 root root 327K Sep 20 2018 libopencv_stereo.a lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_stereo.so -> libopencv_stereo.so.3.2 lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_stereo.so.3.2 -> libopencv_stereo.so.3.2.0 -rw-r--r-- 1 root root 123K Sep 20 2018 libopencv_stereo.so.3.2.0 -rw-r--r-- 1 root root 1.1M Sep 20 2018 libopencv_stitching.a lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_stitching.so -> libopencv_stitching.so.3.2 lrwxrwxrwx 1 root root 28 Sep 20 2018 libopencv_stitching.so.3.2 -> libopencv_stitching.so.3.2.0 -rw-r--r-- 1 root root 519K Sep 20 2018 libopencv_stitching.so.3.2.0 -rw-r--r-- 1 root root 156K Sep 20 2018 libopencv_structured_light.a lrwxrwxrwx 1 root root 33 Sep 20 2018 libopencv_structured_light.so -> libopencv_structured_light.so.3.2 lrwxrwxrwx 1 root root 35 Sep 20 2018 libopencv_structured_light.so.3.2 -> libopencv_structured_light.so.3.2.0 -rw-r--r-- 1 root root 91K Sep 20 2018 libopencv_structured_light.so.3.2.0 -rw-r--r-- 1 root root 295K Sep 20 2018 libopencv_superres.a lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_superres.so -> libopencv_superres.so.3.2 lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_superres.so.3.2 -> libopencv_superres.so.3.2.0 -rw-r--r-- 1 root root 151K Sep 20 2018 libopencv_superres.so.3.2.0 -rw-r--r-- 1 root root 1.1M Sep 20 2018 libopencv_surface_matching.a lrwxrwxrwx 1 root root 33 Sep 20 2018 libopencv_surface_matching.so -> libopencv_surface_matching.so.3.2 lrwxrwxrwx 1 root root 35 Sep 20 2018 libopencv_surface_matching.so.3.2 -> libopencv_surface_matching.so.3.2.0 -rw-r--r-- 1 root root 259K Sep 20 2018 libopencv_surface_matching.so.3.2.0 -rw-r--r-- 1 root root 761K Sep 20 2018 libopencv_text.a lrwxrwxrwx 1 root root 21 Sep 20 2018 libopencv_text.so -> libopencv_text.so.3.2 lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_text.so.3.2 -> libopencv_text.so.3.2.0 -rw-r--r-- 1 root root 367K Sep 20 2018 libopencv_text.so.3.2.0 -rw-r--r-- 1 root root 1.6M Sep 20 2018 libopencv_ts.a -rw-r--r-- 1 root root 700K Sep 20 2018 libopencv_video.a lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_video.so -> libopencv_video.so.3.2 lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_video.so.3.2 -> libopencv_video.so.3.2.0 -rw-r--r-- 1 root root 395K Sep 20 2018 libopencv_video.so.3.2.0 -rw-r--r-- 1 root root 488K Sep 20 2018 libopencv_videoio.a lrwxrwxrwx 1 root root 24 Sep 20 2018 libopencv_videoio.so -> libopencv_videoio.so.3.2 lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_videoio.so.3.2 -> libopencv_videoio.so.3.2.0 -rw-r--r-- 1 root root 211K Sep 20 2018 libopencv_videoio.so.3.2.0 -rw-r--r-- 1 root root 688K Sep 20 2018 libopencv_videostab.a lrwxrwxrwx 1 root root 26 Sep 20 2018 libopencv_videostab.so -> libopencv_videostab.so.3.2 lrwxrwxrwx 1 root root 28 Sep 20 2018 libopencv_videostab.so.3.2 -> libopencv_videostab.so.3.2.0 -rw-r--r-- 1 root root 347K Sep 20 2018 libopencv_videostab.so.3.2.0 -rw-r--r-- 1 root root 1019K Sep 20 2018 libopencv_viz.a lrwxrwxrwx 1 root root 20 Sep 20 2018 libopencv_viz.so -> libopencv_viz.so.3.2 lrwxrwxrwx 1 root root 22 Sep 20 2018 libopencv_viz.so.3.2 -> libopencv_viz.so.3.2.0 -rw-r--r-- 1 root root 363K Sep 20 2018 libopencv_viz.so.3.2.0 -rw-r--r-- 1 root root 2.3M Sep 20 2018 libopencv_ximgproc.a lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_ximgproc.so -> libopencv_ximgproc.so.3.2 lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_ximgproc.so.3.2 -> libopencv_ximgproc.so.3.2.0 -rw-r--r-- 1 root root 983K Sep 20 2018 libopencv_ximgproc.so.3.2.0 -rw-r--r-- 1 root root 186K Sep 20 2018 libopencv_xobjdetect.a lrwxrwxrwx 1 root root 27 Sep 20 2018 libopencv_xobjdetect.so -> libopencv_xobjdetect.so.3.2 lrwxrwxrwx 1 root root 29 Sep 20 2018 libopencv_xobjdetect.so.3.2 -> libopencv_xobjdetect.so.3.2.0 -rw-r--r-- 1 root root 87K Sep 20 2018 libopencv_xobjdetect.so.3.2.0 -rw-r--r-- 1 root root 441K Sep 20 2018 libopencv_xphoto.a lrwxrwxrwx 1 root root 23 Sep 20 2018 libopencv_xphoto.so -> libopencv_xphoto.so.3.2 lrwxrwxrwx 1 root root 25 Sep 20 2018 libopencv_xphoto.so.3.2 -> libopencv_xphoto.so.3.2.0 -rw-r--r-- 1 root root 220K Sep 20 2018 libopencv_xphoto.so.3.2.0 -rw-r--r-- 1 root root 343K Aug 20 2019 libopenjp2.so.2.3.0 lrwxrwxrwx 1 root root 19 Aug 20 2019 libopenjp2.so.7 -> libopenjp2.so.2.3.0 lrwxrwxrwx 1 root root 19 Feb 4 2018 libopenmpt.so.0 -> libopenmpt.so.0.1.1 -rw-r--r-- 1 root root 1.5M Feb 4 2018 libopenmpt.so.0.1.1 lrwxrwxrwx 1 root root 16 Feb 17 2016 libopus.so.0 -> libopus.so.0.5.2 -rw-r--r-- 1 root root 295K Feb 17 2016 libopus.so.0.5.2 lrwxrwxrwx 1 root root 32 Feb 5 2018 liboshmem.so.20 -> openmpi/lib/liboshmem.so.20.10.1 lrwxrwxrwx 1 root root 19 Jan 4 2021 libp11-kit.so.0 -> libp11-kit.so.0.3.0 -rw-r--r-- 1 root root 1.2M Jan 4 2021 libp11-kit.so.0.3.0 lrwxrwxrwx 1 root root 15 May 23 2018 libpanel.so.5 -> libpanel.so.5.9 -rw-r--r-- 1 root root 14K May 23 2018 libpanel.so.5.9 lrwxrwxrwx 1 root root 16 May 23 2018 libpanelw.so.5 -> libpanelw.so.5.9 -rw-r--r-- 1 root root 14K May 23 2018 libpanelw.so.5.9 lrwxrwxrwx 1 root root 25 Aug 21 2018 libpango-1.0.so.0 -> libpango-1.0.so.0.4000.14 -rw-r--r-- 1 root root 306K Aug 21 2018 libpango-1.0.so.0.4000.14 lrwxrwxrwx 1 root root 30 Aug 21 2018 libpangocairo-1.0.so.0 -> libpangocairo-1.0.so.0.4000.14 -rw-r--r-- 1 root root 50K Aug 21 2018 libpangocairo-1.0.so.0.4000.14 lrwxrwxrwx 1 root root 28 Aug 21 2018 libpangoft2-1.0.so.0 -> libpangoft2-1.0.so.0.4000.14 -rw-r--r-- 1 root root 86K Aug 21 2018 libpangoft2-1.0.so.0.4000.14 lrwxrwxrwx 1 root root 20 Mar 24 2018 libpathplan.so -> libpathplan.so.4.0.0 lrwxrwxrwx 1 root root 20 Mar 24 2018 libpathplan.so.4 -> libpathplan.so.4.0.0 -rw-r--r-- 1 root root 31K Mar 24 2018 libpathplan.so.4.0.0 lrwxrwxrwx 1 root root 22 Mar 18 2018 libpciaccess.so.0 -> libpciaccess.so.0.11.1 -rw-r--r-- 1 root root 35K Mar 18 2018 libpciaccess.so.0.11.1 -rw-r--r-- 1 root root 615K May 17 07:42 libpcre.a lrwxrwxrwx 1 root root 34 May 17 07:42 libpcre.so -> /lib/x86_64-linux-gnu/libpcre.so.3 -rw-r--r-- 1 root root 576K May 17 07:42 libpcre16.a lrwxrwxrwx 1 root root 19 May 17 07:42 libpcre16.so -> libpcre16.so.3.13.3 lrwxrwxrwx 1 root root 19 May 17 07:42 libpcre16.so.3 -> libpcre16.so.3.13.3 -rw-r--r-- 1 root root 410K May 17 07:42 libpcre16.so.3.13.3 -rw-r--r-- 1 root root 558K May 17 07:42 libpcre32.a lrwxrwxrwx 1 root root 19 May 17 07:42 libpcre32.so -> libpcre32.so.3.13.3 lrwxrwxrwx 1 root root 19 May 17 07:42 libpcre32.so.3 -> libpcre32.so.3.13.3 -rw-r--r-- 1 root root 390K May 17 07:42 libpcre32.so.3.13.3 -rw-r--r-- 1 root root 51K May 17 07:42 libpcrecpp.a lrwxrwxrwx 1 root root 19 May 17 07:42 libpcrecpp.so -> libpcrecpp.so.0.0.1 lrwxrwxrwx 1 root root 19 May 17 07:42 libpcrecpp.so.0 -> libpcrecpp.so.0.0.1 -rw-r--r-- 1 root root 34K May 17 07:42 libpcrecpp.so.0.0.1 -rw-r--r-- 1 root root 6.6K May 17 07:42 libpcreposix.a lrwxrwxrwx 1 root root 22 May 17 07:42 libpcreposix.so -> libpcreposix.so.3.13.3 lrwxrwxrwx 1 root root 22 May 17 07:42 libpcreposix.so.3 -> libpcreposix.so.3.13.3 -rw-r--r-- 1 root root 9.9K May 17 07:42 libpcreposix.so.3.13.3 lrwxrwxrwx 1 root root 17 Oct 19 2020 libperl.so.5.26 -> libperl.so.5.26.1 -rw-r--r-- 1 root root 2.0M Oct 19 2020 libperl.so.5.26.1 lrwxrwxrwx 1 root root 21 Dec 17 2017 libpixman-1.so.0 -> libpixman-1.so.0.34.0 -rw-r--r-- 1 root root 659K Dec 17 2017 libpixman-1.so.0.34.0 -rw-r--r-- 1 root root 19K Feb 22 2018 libplc4.so -rw-r--r-- 1 root root 15K Feb 22 2018 libplds4.so lrwxrwxrwx 1 root root 10 Apr 30 2019 libpng.a -> libpng16.a lrwxrwxrwx 1 root root 11 Apr 30 2019 libpng.so -> libpng16.so -rw-r--r-- 1 root root 328K Apr 30 2019 libpng16.a lrwxrwxrwx 1 root root 19 Apr 30 2019 libpng16.so -> libpng16.so.16.34.0 lrwxrwxrwx 1 root root 19 Apr 30 2019 libpng16.so.16 -> libpng16.so.16.34.0 -rw-r--r-- 1 root root 198K Apr 30 2019 libpng16.so.16.34.0 lrwxrwxrwx 1 root root 20 Nov 26 2020 libpoppler.so.73 -> libpoppler.so.73.0.0 -rw-r--r-- 1 root root 2.6M Nov 26 2020 libpoppler.so.73.0.0 lrwxrwxrwx 1 root root 13 May 18 01:58 libpq.so.5 -> libpq.so.5.10 -rw-r--r-- 1 root root 290K May 18 01:58 libpq.so.5.10 lrwxrwxrwx 1 root root 17 Jun 18 2017 libproj.so.12 -> libproj.so.12.0.0 -rw-r--r-- 1 root root 420K Jun 18 2017 libproj.so.12.0.0 lrwxrwxrwx 1 root root 17 Dec 8 2020 libproxy.so.1 -> libproxy.so.1.0.0 -rw-r--r-- 1 root root 123K Dec 8 2020 libproxy.so.1.0.0 lrwxrwxrwx 1 root root 15 Mar 4 2018 libpsl.so.5 -> libpsl.so.5.2.0 -rw-r--r-- 1 root root 54K Mar 4 2018 libpsl.so.5.2.0 lrwxrwxrwx 1 root root 40 Aug 16 08:53 libpsm_infinipath.so.1 -> /etc/alternatives/libpsm_infinipath.so.1 -rw-r--r-- 1 root root 6.0M May 3 10:19 libpthread.a -rw-r--r-- 1 root root 252 May 3 10:19 libpthread.so -rw-r--r-- 1 root root 29K May 3 10:19 libpthread_nonshared.a lrwxrwxrwx 1 root root 19 Jul 1 15:56 libpython2.7.so.1 -> libpython2.7.so.1.0 -rw-r--r-- 1 root root 3.4M Jul 1 15:56 libpython2.7.so.1.0 lrwxrwxrwx 1 root root 17 Dec 9 2017 libqhull.so.7 -> libqhull.so.7.2.0 -rw-r--r-- 1 root root 358K Dec 9 2017 libqhull.so.7.2.0 lrwxrwxrwx 1 root root 20 Mar 10 2020 libquadmath.so.0 -> libquadmath.so.0.0.0 -rw-r--r-- 1 root root 256K Mar 10 2020 libquadmath.so.0.0.0 -rw-r--r-- 1 root root 91K Apr 26 2016 libraw1394.a -rw-r--r-- 1 root root 969 Apr 26 2016 libraw1394.la lrwxrwxrwx 1 root root 20 Apr 26 2016 libraw1394.so -> libraw1394.so.11.1.0 lrwxrwxrwx 1 root root 20 Apr 26 2016 libraw1394.so.11 -> libraw1394.so.11.1.0 -rw-r--r-- 1 root root 59K Apr 26 2016 libraw1394.so.11.1.0 lrwxrwxrwx 1 root root 21 Aug 5 2019 librdmacm.so.1 -> librdmacm.so.1.1.17.1 -rw-r--r-- 1 root root 87K Aug 5 2019 librdmacm.so.1.1.17.1 -rw-r--r-- 1 root root 136K May 3 10:19 libresolv.a lrwxrwxrwx 1 root root 36 May 3 10:19 libresolv.so -> /lib/x86_64-linux-gnu/libresolv.so.2 lrwxrwxrwx 1 root root 20 Oct 24 2016 librest-0.7.so.0 -> librest-0.7.so.0.0.0 -rw-r--r-- 1 root root 88K Oct 24 2016 librest-0.7.so.0.0.0 lrwxrwxrwx 1 root root 18 Dec 15 2017 libroken.so.18 -> libroken.so.18.1.0 -rw-r--r-- 1 root root 87K Dec 15 2017 libroken.so.18.1.0 -rw-r--r-- 1 root root 52K May 3 10:19 librpcsvc.a lrwxrwxrwx 1 root root 20 Jul 28 2020 librsvg-2.so.2 -> librsvg-2.so.2.40.20 -rw-r--r-- 1 root root 223K Jul 28 2020 librsvg-2.so.2.40.20 -rw-r--r-- 1 root root 76K May 3 10:19 librt.a lrwxrwxrwx 1 root root 32 May 3 10:19 librt.so -> /lib/x86_64-linux-gnu/librt.so.1 -rw-r--r-- 1 root root 111K Apr 27 2016 librtmp.so.1 lrwxrwxrwx 1 root root 18 Feb 15 2022 libsasl2.so.2 -> libsasl2.so.2.0.25 -rw-r--r-- 1 root root 107K Feb 15 2022 libsasl2.so.2.0.25 -rw-r--r-- 1 root root 251K Mar 1 2018 libsemanage.so.1 lrwxrwxrwx 1 root root 19 Mar 31 20:53 libsensors.so.4 -> libsensors.so.4.4.0 -rw-r--r-- 1 root root 59K Mar 31 20:53 libsensors.so.4.4.0 lrwxrwxrwx 1 root root 17 Aug 6 2017 libshine.so.3 -> libshine.so.3.0.1 -rw-r--r-- 1 root root 43K Aug 6 2017 libshine.so.3.0.1 -rw-r--r-- 1 root root 181K Jul 6 11:25 libsmime3.so lrwxrwxrwx 1 root root 18 Sep 5 2017 libsnappy.so.1 -> libsnappy.so.1.1.7 -rw-r--r-- 1 root root 31K Sep 5 2017 libsnappy.so.1.1.7 lrwxrwxrwx 1 root root 20 Oct 5 2017 libsocket++.so.1 -> libsocket++.so.1.0.2 -rw-r--r-- 1 root root 132K Oct 5 2017 libsocket++.so.1.0.2 lrwxrwxrwx 1 root root 20 Oct 9 2019 libsoup-2.4.so.1 -> libsoup-2.4.so.1.8.0 -rw-r--r-- 1 root root 971K Oct 9 2019 libsoup-2.4.so.1.8.0 lrwxrwxrwx 1 root root 26 Oct 9 2019 libsoup-gnome-2.4.so.1 -> libsoup-gnome-2.4.so.1.8.0 -rw-r--r-- 1 root root 9.9K Oct 9 2019 libsoup-gnome-2.4.so.1.8.0 lrwxrwxrwx 1 root root 16 Nov 4 2017 libsoxr.so.0 -> libsoxr.so.0.1.1 -rw-r--r-- 1 root root 183K Nov 4 2017 libsoxr.so.0.1.1 lrwxrwxrwx 1 root root 22 Nov 17 2016 libspatialite.so.7 -> libspatialite.so.7.1.0 -rw-r--r-- 1 root root 5.6M Nov 17 2016 libspatialite.so.7.1.0 lrwxrwxrwx 1 root root 17 Feb 8 2022 libspeex.so.1 -> libspeex.so.1.5.0 -rw-r--r-- 1 root root 102K Feb 8 2022 libspeex.so.1.5.0 lrwxrwxrwx 1 root root 19 Apr 28 14:04 libsqlite3.so.0 -> libsqlite3.so.0.8.6 -rw-r--r-- 1 root root 1.1M Apr 28 14:04 libsqlite3.so.0.8.6 lrwxrwxrwx 1 root root 22 Jul 31 2020 libssh-gcrypt.so.4 -> libssh-gcrypt.so.4.5.0 -rw-r--r-- 1 root root 465K Jul 31 2020 libssh-gcrypt.so.4.5.0 lrwxrwxrwx 1 root root 30 Jul 31 2020 libssh-gcrypt_threads.so.4 -> libssh-gcrypt_threads.so.4.5.0 -rw-r--r-- 1 root root 6.0K Jul 31 2020 libssh-gcrypt_threads.so.4.5.0 -rw-r--r-- 1 root root 564K May 3 17:51 libssl.so.1.1 -rw-r--r-- 1 root root 321K Jul 6 11:25 libssl3.so lrwxrwxrwx 1 root root 19 Mar 10 2020 libstdc++.so.6 -> libstdc++.so.6.0.25 -rw-r--r-- 1 root root 1.6M Mar 10 2020 libstdc++.so.6.0.25 lrwxrwxrwx 1 root root 19 Nov 6 2017 libsuperlu.so.5 -> libsuperlu.so.5.2.1 -rw-r--r-- 1 root root 447K Nov 6 2017 libsuperlu.so.5.2.1 -rw-r--r-- 1 root root 220K May 18 20:01 libswresample.a lrwxrwxrwx 1 root root 24 May 18 20:01 libswresample.so -> libswresample.so.2.9.100 lrwxrwxrwx 1 root root 24 May 18 20:01 libswresample.so.2 -> libswresample.so.2.9.100 -rw-r--r-- 1 root root 122K May 18 20:01 libswresample.so.2.9.100 -rw-r--r-- 1 root root 780K May 18 20:01 libswscale.a lrwxrwxrwx 1 root root 21 May 18 20:01 libswscale.so -> libswscale.so.4.8.100 lrwxrwxrwx 1 root root 21 May 18 20:01 libswscale.so.4 -> libswscale.so.4.8.100 -rw-r--r-- 1 root root 538K May 18 20:01 libswscale.so.4.8.100 lrwxrwxrwx 1 root root 14 Jul 28 2017 libsz.so.2 -> libsz.so.2.0.1 -rw-r--r-- 1 root root 9.9K Jul 28 2017 libsz.so.2.0.1 lrwxrwxrwx 1 root root 17 Jan 21 2018 libtasn1.so.6 -> libtasn1.so.6.5.5 -rw-r--r-- 1 root root 74K Jan 21 2018 libtasn1.so.6.5.5 lrwxrwxrwx 1 root root 11 Oct 4 2017 libtbb.so -> libtbb.so.2 -rw-r--r-- 1 root root 230K Oct 4 2017 libtbb.so.2 lrwxrwxrwx 1 root root 17 Oct 4 2017 libtbbmalloc.so -> libtbbmalloc.so.2 -rw-r--r-- 1 root root 110K Oct 4 2017 libtbbmalloc.so.2 lrwxrwxrwx 1 root root 23 Oct 4 2017 libtbbmalloc_proxy.so -> libtbbmalloc_proxy.so.2 -rw-r--r-- 1 root root 11K Oct 4 2017 libtbbmalloc_proxy.so.2 -rw-r--r-- 1 root root 1.7M Feb 22 2018 libtcl8.6.so lrwxrwxrwx 1 root root 12 Feb 22 2018 libtcl8.6.so.0 -> libtcl8.6.so lrwxrwxrwx 1 root root 21 Apr 7 2018 libtesseract.so.4 -> libtesseract.so.4.0.0 -rw-r--r-- 1 root root 3.1M Apr 7 2018 libtesseract.so.4.0.0 lrwxrwxrwx 1 root root 16 Feb 16 2018 libthai.so.0 -> libthai.so.0.3.0 -rw-r--r-- 1 root root 37K Feb 16 2018 libthai.so.0.3.0 lrwxrwxrwx 1 root root 19 May 27 2016 libtheora.so.0 -> libtheora.so.0.3.10 -rw-r--r-- 1 root root 327K May 27 2016 libtheora.so.0.3.10 lrwxrwxrwx 1 root root 21 May 27 2016 libtheoradec.so.1 -> libtheoradec.so.1.1.4 -rw-r--r-- 1 root root 119K May 27 2016 libtheoradec.so.1.1.4 lrwxrwxrwx 1 root root 21 May 27 2016 libtheoraenc.so.1 -> libtheoraenc.so.1.1.2 -rw-r--r-- 1 root root 251K May 27 2016 libtheoraenc.so.1.1.2 lrwxrwxrwx 1 root root 39 May 3 10:19 libthread_db.so -> /lib/x86_64-linux-gnu/libthread_db.so.1 lrwxrwxrwx 1 root root 13 May 23 2018 libtic.so.5 -> libtic.so.5.9 -rw-r--r-- 1 root root 62K May 23 2018 libtic.so.5.9 -rw-r--r-- 1 root root 752K May 11 15:09 libtiff.a lrwxrwxrwx 1 root root 16 May 11 15:09 libtiff.so -> libtiff.so.5.3.0 lrwxrwxrwx 1 root root 16 May 11 15:09 libtiff.so.5 -> libtiff.so.5.3.0 -rw-r--r-- 1 root root 479K May 11 15:09 libtiff.so.5.3.0 -rw-r--r-- 1 root root 8.5K May 11 15:09 libtiffxx.a lrwxrwxrwx 1 root root 18 May 11 15:09 libtiffxx.so -> libtiffxx.so.5.3.0 lrwxrwxrwx 1 root root 18 May 11 15:09 libtiffxx.so.5 -> libtiffxx.so.5.3.0 -rw-r--r-- 1 root root 10K May 11 15:09 libtiffxx.so.5.3.0 -rw-r--r-- 1 root root 1.4M Mar 24 2018 libtk8.6.so lrwxrwxrwx 1 root root 11 Mar 24 2018 libtk8.6.so.0 -> libtk8.6.so lrwxrwxrwx 1 root root 16 Mar 10 2020 libtsan.so.0 -> libtsan.so.0.0.0 -rw-r--r-- 1 root root 942K Mar 10 2020 libtsan.so.0.0.0 -rw-r--r-- 1 root root 3.6K Mar 10 2020 libtsan_preinit.o lrwxrwxrwx 1 root root 19 Aug 29 2017 libtwolame.so.0 -> libtwolame.so.0.0.0 -rw-r--r-- 1 root root 122K Aug 29 2017 libtwolame.so.0.0.0 lrwxrwxrwx 1 root root 17 Dec 4 2019 libubsan.so.0 -> libubsan.so.0.0.0 -rw-r--r-- 1 root root 322K Dec 4 2019 libubsan.so.0.0.0 lrwxrwxrwx 1 root root 21 Mar 21 2019 libunistring.so.2 -> libunistring.so.2.1.0 -rw-r--r-- 1 root root 1.5M Mar 21 2019 libunistring.so.2.1.0 lrwxrwxrwx 1 root root 22 Jul 12 13:52 liburiparser.so.1 -> liburiparser.so.1.0.20 -rw-r--r-- 1 root root 110K Jul 12 13:52 liburiparser.so.1.0.20 -rw-r--r-- 1 root root 15K May 3 10:19 libutil.a lrwxrwxrwx 1 root root 34 May 3 10:19 libutil.so -> /lib/x86_64-linux-gnu/libutil.so.1 lrwxrwxrwx 1 root root 20 Apr 3 2018 libva-drm.so.2 -> libva-drm.so.2.100.0 -rw-r--r-- 1 root root 11K Apr 3 2018 libva-drm.so.2.100.0 lrwxrwxrwx 1 root root 20 Apr 3 2018 libva-x11.so.2 -> libva-x11.so.2.100.0 -rw-r--r-- 1 root root 23K Apr 3 2018 libva-x11.so.2.100.0 lrwxrwxrwx 1 root root 16 Apr 3 2018 libva.so.2 -> libva.so.2.100.0 -rw-r--r-- 1 root root 131K Apr 3 2018 libva.so.2.100.0 lrwxrwxrwx 1 root root 17 Feb 19 2016 libvdpau.so.1 -> libvdpau.so.1.0.0 -rw-r--r-- 1 root root 15K Feb 19 2016 libvdpau.so.1.0.0 lrwxrwxrwx 1 root root 18 Mar 16 2018 libvorbis.so.0 -> libvorbis.so.0.4.8 -rw-r--r-- 1 root root 171K Mar 16 2018 libvorbis.so.0.4.8 lrwxrwxrwx 1 root root 22 Mar 16 2018 libvorbisenc.so.2 -> libvorbisenc.so.2.0.11 -rw-r--r-- 1 root root 675K Mar 16 2018 libvorbisenc.so.2.0.11 lrwxrwxrwx 1 root root 22 Mar 16 2018 libvorbisfile.so.3 -> libvorbisfile.so.3.3.7 -rw-r--r-- 1 root root 31K Mar 16 2018 libvorbisfile.so.3.3.7 lrwxrwxrwx 1 root root 15 Nov 19 2019 libvpx.so.5 -> libvpx.so.5.0.0 lrwxrwxrwx 1 root root 15 Nov 19 2019 libvpx.so.5.0 -> libvpx.so.5.0.0 -rw-r--r-- 1 root root 2.3M Nov 19 2019 libvpx.so.5.0.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkChartsCore-6.3.so.6.3 -> libvtkChartsCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.4M Dec 17 2017 libvtkChartsCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkChartsCorePython27D-6.3.so.6.3 -> libvtkChartsCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 939K Dec 17 2017 libvtkChartsCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkChartsCoreTCL-6.3.so.6.3 -> libvtkChartsCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 573K Dec 17 2017 libvtkChartsCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkCommonColor-6.3.so.6.3 -> libvtkCommonColor-6.3.so.6.3.0 -rw-r--r-- 1 root root 106K Dec 17 2017 libvtkCommonColor-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkCommonColorPython27D-6.3.so.6.3 -> libvtkCommonColorPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 70K Dec 17 2017 libvtkCommonColorPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkCommonColorTCL-6.3.so.6.3 -> libvtkCommonColorTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkCommonColorTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkCommonComputationalGeometry-6.3.so.6.3 -> libvtkCommonComputationalGeometry-6.3.so.6.3.0 -rw-r--r-- 1 root root 240K Dec 17 2017 libvtkCommonComputationalGeometry-6.3.so.6.3.0 lrwxrwxrwx 1 root root 55 Dec 17 2017 libvtkCommonComputationalGeometryPython27D-6.3.so.6.3 -> libvtkCommonComputationalGeometryPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 423K Dec 17 2017 libvtkCommonComputationalGeometryPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 49 Dec 17 2017 libvtkCommonComputationalGeometryTCL-6.3.so.6.3 -> libvtkCommonComputationalGeometryTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 259K Dec 17 2017 libvtkCommonComputationalGeometryTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkCommonCore-6.3.so.6.3 -> libvtkCommonCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.2M Dec 17 2017 libvtkCommonCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkCommonCorePython27D-6.3.so.6.3 -> libvtkCommonCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.0M Dec 17 2017 libvtkCommonCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkCommonCoreTCL-6.3.so.6.3 -> libvtkCommonCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 900K Dec 17 2017 libvtkCommonCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkCommonDataModel-6.3.so.6.3 -> libvtkCommonDataModel-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.4M Dec 17 2017 libvtkCommonDataModel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkCommonDataModelPython27D-6.3.so.6.3 -> libvtkCommonDataModelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 4.2M Dec 17 2017 libvtkCommonDataModelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkCommonDataModelTCL-6.3.so.6.3 -> libvtkCommonDataModelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.2M Dec 17 2017 libvtkCommonDataModelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkCommonExecutionModel-6.3.so.6.3 -> libvtkCommonExecutionModel-6.3.so.6.3.0 -rw-r--r-- 1 root root 669K Dec 17 2017 libvtkCommonExecutionModel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 48 Dec 17 2017 libvtkCommonExecutionModelPython27D-6.3.so.6.3 -> libvtkCommonExecutionModelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 621K Dec 17 2017 libvtkCommonExecutionModelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkCommonExecutionModelTCL-6.3.so.6.3 -> libvtkCommonExecutionModelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 474K Dec 17 2017 libvtkCommonExecutionModelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkCommonMath-6.3.so.6.3 -> libvtkCommonMath-6.3.so.6.3.0 -rw-r--r-- 1 root root 128K Dec 17 2017 libvtkCommonMath-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkCommonMathPython27D-6.3.so.6.3 -> libvtkCommonMathPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 347K Dec 17 2017 libvtkCommonMathPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkCommonMathTCL-6.3.so.6.3 -> libvtkCommonMathTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 103K Dec 17 2017 libvtkCommonMathTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkCommonMisc-6.3.so.6.3 -> libvtkCommonMisc-6.3.so.6.3.0 -rw-r--r-- 1 root root 88K Dec 17 2017 libvtkCommonMisc-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkCommonMiscPython27D-6.3.so.6.3 -> libvtkCommonMiscPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 88K Dec 17 2017 libvtkCommonMiscPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkCommonMiscTCL-6.3.so.6.3 -> libvtkCommonMiscTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkCommonMiscTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkCommonSystem-6.3.so.6.3 -> libvtkCommonSystem-6.3.so.6.3.0 -rw-r--r-- 1 root root 76K Dec 17 2017 libvtkCommonSystem-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkCommonSystemPython27D-6.3.so.6.3 -> libvtkCommonSystemPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkCommonSystemPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkCommonSystemTCL-6.3.so.6.3 -> libvtkCommonSystemTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 67K Dec 17 2017 libvtkCommonSystemTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkCommonTransforms-6.3.so.6.3 -> libvtkCommonTransforms-6.3.so.6.3.0 -rw-r--r-- 1 root root 184K Dec 17 2017 libvtkCommonTransforms-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkCommonTransformsPython27D-6.3.so.6.3 -> libvtkCommonTransformsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 263K Dec 17 2017 libvtkCommonTransformsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkCommonTransformsTCL-6.3.so.6.3 -> libvtkCommonTransformsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 200K Dec 17 2017 libvtkCommonTransformsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkDICOMParser-6.3.so.6.3 -> libvtkDICOMParser-6.3.so.6.3.0 -rw-r--r-- 1 root root 96K Dec 17 2017 libvtkDICOMParser-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkDomainsChemistry-6.3.so.6.3 -> libvtkDomainsChemistry-6.3.so.6.3.0 -rw-r--r-- 1 root root 293K Dec 17 2017 libvtkDomainsChemistry-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkDomainsChemistryPython27D-6.3.so.6.3 -> libvtkDomainsChemistryPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 181K Dec 17 2017 libvtkDomainsChemistryPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkDomainsChemistryTCL-6.3.so.6.3 -> libvtkDomainsChemistryTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 127K Dec 17 2017 libvtkDomainsChemistryTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkFiltersAMR-6.3.so.6.3 -> libvtkFiltersAMR-6.3.so.6.3.0 -rw-r--r-- 1 root root 273K Dec 17 2017 libvtkFiltersAMR-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersAMRPython27D-6.3.so.6.3 -> libvtkFiltersAMRPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 183K Dec 17 2017 libvtkFiltersAMRPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkFiltersAMRTCL-6.3.so.6.3 -> libvtkFiltersAMRTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 139K Dec 17 2017 libvtkFiltersAMRTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkFiltersCore-6.3.so.6.3 -> libvtkFiltersCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 4.9M Dec 17 2017 libvtkFiltersCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkFiltersCorePython27D-6.3.so.6.3 -> libvtkFiltersCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.6M Dec 17 2017 libvtkFiltersCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersCoreTCL-6.3.so.6.3 -> libvtkFiltersCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.5M Dec 17 2017 libvtkFiltersCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersExtraction-6.3.so.6.3 -> libvtkFiltersExtraction-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.1M Dec 17 2017 libvtkFiltersExtraction-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkFiltersExtractionPython27D-6.3.so.6.3 -> libvtkFiltersExtractionPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 497K Dec 17 2017 libvtkFiltersExtractionPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkFiltersExtractionTCL-6.3.so.6.3 -> libvtkFiltersExtractionTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 360K Dec 17 2017 libvtkFiltersExtractionTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkFiltersFlowPaths-6.3.so.6.3 -> libvtkFiltersFlowPaths-6.3.so.6.3.0 -rw-r--r-- 1 root root 473K Dec 17 2017 libvtkFiltersFlowPaths-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersFlowPathsPython27D-6.3.so.6.3 -> libvtkFiltersFlowPathsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 495K Dec 17 2017 libvtkFiltersFlowPathsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersFlowPathsTCL-6.3.so.6.3 -> libvtkFiltersFlowPathsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 264K Dec 17 2017 libvtkFiltersFlowPathsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersGeneral-6.3.so.6.3 -> libvtkFiltersGeneral-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.2M Dec 17 2017 libvtkFiltersGeneral-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersGeneralPython27D-6.3.so.6.3 -> libvtkFiltersGeneralPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.2M Dec 17 2017 libvtkFiltersGeneralPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersGeneralTCL-6.3.so.6.3 -> libvtkFiltersGeneralTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.5M Dec 17 2017 libvtkFiltersGeneralTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersGeneric-6.3.so.6.3 -> libvtkFiltersGeneric-6.3.so.6.3.0 -rw-r--r-- 1 root root 296K Dec 17 2017 libvtkFiltersGeneric-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersGenericPython27D-6.3.so.6.3 -> libvtkFiltersGenericPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 391K Dec 17 2017 libvtkFiltersGenericPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersGenericTCL-6.3.so.6.3 -> libvtkFiltersGenericTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 220K Dec 17 2017 libvtkFiltersGenericTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkFiltersGeometry-6.3.so.6.3 -> libvtkFiltersGeometry-6.3.so.6.3.0 -rw-r--r-- 1 root root 546K Dec 17 2017 libvtkFiltersGeometry-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkFiltersGeometryPython27D-6.3.so.6.3 -> libvtkFiltersGeometryPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 423K Dec 17 2017 libvtkFiltersGeometryPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkFiltersGeometryTCL-6.3.so.6.3 -> libvtkFiltersGeometryTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 280K Dec 17 2017 libvtkFiltersGeometryTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkFiltersHybrid-6.3.so.6.3 -> libvtkFiltersHybrid-6.3.so.6.3.0 -rw-r--r-- 1 root root 867K Dec 17 2017 libvtkFiltersHybrid-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkFiltersHybridPython27D-6.3.so.6.3 -> libvtkFiltersHybridPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 701K Dec 17 2017 libvtkFiltersHybridPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkFiltersHybridTCL-6.3.so.6.3 -> libvtkFiltersHybridTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 432K Dec 17 2017 libvtkFiltersHybridTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkFiltersHyperTree-6.3.so.6.3 -> libvtkFiltersHyperTree-6.3.so.6.3.0 -rw-r--r-- 1 root root 297K Dec 17 2017 libvtkFiltersHyperTree-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersHyperTreePython27D-6.3.so.6.3 -> libvtkFiltersHyperTreePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 218K Dec 17 2017 libvtkFiltersHyperTreePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersHyperTreeTCL-6.3.so.6.3 -> libvtkFiltersHyperTreeTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 163K Dec 17 2017 libvtkFiltersHyperTreeTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersImaging-6.3.so.6.3 -> libvtkFiltersImaging-6.3.so.6.3.0 -rw-r--r-- 1 root root 136K Dec 17 2017 libvtkFiltersImaging-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersImagingPython27D-6.3.so.6.3 -> libvtkFiltersImagingPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 111K Dec 17 2017 libvtkFiltersImagingPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersImagingTCL-6.3.so.6.3 -> libvtkFiltersImagingTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkFiltersImagingTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkFiltersModeling-6.3.so.6.3 -> libvtkFiltersModeling-6.3.so.6.3.0 -rw-r--r-- 1 root root 481K Dec 17 2017 libvtkFiltersModeling-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkFiltersModelingPython27D-6.3.so.6.3 -> libvtkFiltersModelingPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 607K Dec 17 2017 libvtkFiltersModelingPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkFiltersModelingTCL-6.3.so.6.3 -> libvtkFiltersModelingTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 388K Dec 17 2017 libvtkFiltersModelingTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkFiltersParallel-6.3.so.6.3 -> libvtkFiltersParallel-6.3.so.6.3.0 -rw-r--r-- 1 root root 860K Dec 17 2017 libvtkFiltersParallel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkFiltersParallelFlowPaths-6.3.so.6.3 -> libvtkFiltersParallelFlowPaths-6.3.so.6.3.0 -rw-r--r-- 1 root root 232K Dec 17 2017 libvtkFiltersParallelFlowPaths-6.3.so.6.3.0 lrwxrwxrwx 1 root root 52 Dec 17 2017 libvtkFiltersParallelFlowPathsPython27D-6.3.so.6.3 -> libvtkFiltersParallelFlowPathsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 40K Dec 17 2017 libvtkFiltersParallelFlowPathsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkFiltersParallelFlowPathsTCL-6.3.so.6.3 -> libvtkFiltersParallelFlowPathsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 43K Dec 17 2017 libvtkFiltersParallelFlowPathsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersParallelGeometry-6.3.so.6.3 -> libvtkFiltersParallelGeometry-6.3.so.6.3.0 -rw-r--r-- 1 root root 265K Dec 17 2017 libvtkFiltersParallelGeometry-6.3.so.6.3.0 lrwxrwxrwx 1 root root 51 Dec 17 2017 libvtkFiltersParallelGeometryPython27D-6.3.so.6.3 -> libvtkFiltersParallelGeometryPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 65K Dec 17 2017 libvtkFiltersParallelGeometryPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkFiltersParallelGeometryTCL-6.3.so.6.3 -> libvtkFiltersParallelGeometryTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 55K Dec 17 2017 libvtkFiltersParallelGeometryTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkFiltersParallelImaging-6.3.so.6.3 -> libvtkFiltersParallelImaging-6.3.so.6.3.0 -rw-r--r-- 1 root root 140K Dec 17 2017 libvtkFiltersParallelImaging-6.3.so.6.3.0 lrwxrwxrwx 1 root root 50 Dec 17 2017 libvtkFiltersParallelImagingPython27D-6.3.so.6.3 -> libvtkFiltersParallelImagingPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 45K Dec 17 2017 libvtkFiltersParallelImagingPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersParallelImagingTCL-6.3.so.6.3 -> libvtkFiltersParallelImagingTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkFiltersParallelImagingTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkFiltersParallelMPI-6.3.so.6.3 -> libvtkFiltersParallelMPI-6.3.so.6.3.0 -rw-r--r-- 1 root root 237K Dec 17 2017 libvtkFiltersParallelMPI-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkFiltersParallelMPIPython27D-6.3.so.6.3 -> libvtkFiltersParallelMPIPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 94K Dec 17 2017 libvtkFiltersParallelMPIPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkFiltersParallelMPITCL-6.3.so.6.3 -> libvtkFiltersParallelMPITCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 67K Dec 17 2017 libvtkFiltersParallelMPITCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkFiltersParallelPython27D-6.3.so.6.3 -> libvtkFiltersParallelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 484K Dec 17 2017 libvtkFiltersParallelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersParallelStatistics-6.3.so.6.3 -> libvtkFiltersParallelStatistics-6.3.so.6.3.0 -rw-r--r-- 1 root root 220K Dec 17 2017 libvtkFiltersParallelStatistics-6.3.so.6.3.0 lrwxrwxrwx 1 root root 53 Dec 17 2017 libvtkFiltersParallelStatisticsPython27D-6.3.so.6.3 -> libvtkFiltersParallelStatisticsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 74K Dec 17 2017 libvtkFiltersParallelStatisticsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 47 Dec 17 2017 libvtkFiltersParallelStatisticsTCL-6.3.so.6.3 -> libvtkFiltersParallelStatisticsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkFiltersParallelStatisticsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkFiltersParallelTCL-6.3.so.6.3 -> libvtkFiltersParallelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 380K Dec 17 2017 libvtkFiltersParallelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersProgrammable-6.3.so.6.3 -> libvtkFiltersProgrammable-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkFiltersProgrammable-6.3.so.6.3.0 lrwxrwxrwx 1 root root 47 Dec 17 2017 libvtkFiltersProgrammablePython27D-6.3.so.6.3 -> libvtkFiltersProgrammablePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 61K Dec 17 2017 libvtkFiltersProgrammablePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkFiltersProgrammableTCL-6.3.so.6.3 -> libvtkFiltersProgrammableTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkFiltersProgrammableTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkFiltersPython-6.3.so.6.3 -> libvtkFiltersPython-6.3.so.6.3.0 -rw-r--r-- 1 root root 35K Dec 17 2017 libvtkFiltersPython-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkFiltersPythonPython27D-6.3.so.6.3 -> libvtkFiltersPythonPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkFiltersPythonPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkFiltersReebGraph-6.3.so.6.3 -> libvtkFiltersReebGraph-6.3.so.6.3.0 -rw-r--r-- 1 root root 108K Dec 17 2017 libvtkFiltersReebGraph-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersReebGraphPython27D-6.3.so.6.3 -> libvtkFiltersReebGraphPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 73K Dec 17 2017 libvtkFiltersReebGraphPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersReebGraphTCL-6.3.so.6.3 -> libvtkFiltersReebGraphTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkFiltersReebGraphTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkFiltersSMP-6.3.so.6.3 -> libvtkFiltersSMP-6.3.so.6.3.0 -rw-r--r-- 1 root root 407K Dec 17 2017 libvtkFiltersSMP-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersSMPPython27D-6.3.so.6.3 -> libvtkFiltersSMPPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 110K Dec 17 2017 libvtkFiltersSMPPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkFiltersSMPTCL-6.3.so.6.3 -> libvtkFiltersSMPTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 87K Dec 17 2017 libvtkFiltersSMPTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkFiltersSelection-6.3.so.6.3 -> libvtkFiltersSelection-6.3.so.6.3.0 -rw-r--r-- 1 root root 100K Dec 17 2017 libvtkFiltersSelection-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkFiltersSelectionPython27D-6.3.so.6.3 -> libvtkFiltersSelectionPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 109K Dec 17 2017 libvtkFiltersSelectionPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkFiltersSelectionTCL-6.3.so.6.3 -> libvtkFiltersSelectionTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 83K Dec 17 2017 libvtkFiltersSelectionTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersSources-6.3.so.6.3 -> libvtkFiltersSources-6.3.so.6.3.0 -rw-r--r-- 1 root root 773K Dec 17 2017 libvtkFiltersSources-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersSourcesPython27D-6.3.so.6.3 -> libvtkFiltersSourcesPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.2M Dec 17 2017 libvtkFiltersSourcesPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersSourcesTCL-6.3.so.6.3 -> libvtkFiltersSourcesTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 760K Dec 17 2017 libvtkFiltersSourcesTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersStatistics-6.3.so.6.3 -> libvtkFiltersStatistics-6.3.so.6.3.0 -rw-r--r-- 1 root root 549K Dec 17 2017 libvtkFiltersStatistics-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkFiltersStatisticsPython27D-6.3.so.6.3 -> libvtkFiltersStatisticsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 341K Dec 17 2017 libvtkFiltersStatisticsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkFiltersStatisticsTCL-6.3.so.6.3 -> libvtkFiltersStatisticsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 224K Dec 17 2017 libvtkFiltersStatisticsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersTexture-6.3.so.6.3 -> libvtkFiltersTexture-6.3.so.6.3.0 -rw-r--r-- 1 root root 151K Dec 17 2017 libvtkFiltersTexture-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersTexturePython27D-6.3.so.6.3 -> libvtkFiltersTexturePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 219K Dec 17 2017 libvtkFiltersTexturePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersTextureTCL-6.3.so.6.3 -> libvtkFiltersTextureTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 167K Dec 17 2017 libvtkFiltersTextureTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkFiltersVerdict-6.3.so.6.3 -> libvtkFiltersVerdict-6.3.so.6.3.0 -rw-r--r-- 1 root root 124K Dec 17 2017 libvtkFiltersVerdict-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkFiltersVerdictPython27D-6.3.so.6.3 -> libvtkFiltersVerdictPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 267K Dec 17 2017 libvtkFiltersVerdictPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkFiltersVerdictTCL-6.3.so.6.3 -> libvtkFiltersVerdictTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 156K Dec 17 2017 libvtkFiltersVerdictTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkGeovisCore-6.3.so.6.3 -> libvtkGeovisCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 707K Dec 17 2017 libvtkGeovisCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkGeovisCorePython27D-6.3.so.6.3 -> libvtkGeovisCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 555K Dec 17 2017 libvtkGeovisCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkGeovisCoreTCL-6.3.so.6.3 -> libvtkGeovisCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 392K Dec 17 2017 libvtkGeovisCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 24 Dec 17 2017 libvtkIOAMR-6.3.so.6.3 -> libvtkIOAMR-6.3.so.6.3.0 -rw-r--r-- 1 root root 282K Dec 17 2017 libvtkIOAMR-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkIOAMRPython27D-6.3.so.6.3 -> libvtkIOAMRPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 107K Dec 17 2017 libvtkIOAMRPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOAMRTCL-6.3.so.6.3 -> libvtkIOAMRTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 91K Dec 17 2017 libvtkIOAMRTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkIOCore-6.3.so.6.3 -> libvtkIOCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 458K Dec 17 2017 libvtkIOCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOCorePython27D-6.3.so.6.3 -> libvtkIOCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 322K Dec 17 2017 libvtkIOCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOCoreTCL-6.3.so.6.3 -> libvtkIOCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 240K Dec 17 2017 libvtkIOCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOEnSight-6.3.so.6.3 -> libvtkIOEnSight-6.3.so.6.3.0 -rw-r--r-- 1 root root 549K Dec 17 2017 libvtkIOEnSight-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkIOEnSightPython27D-6.3.so.6.3 -> libvtkIOEnSightPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 141K Dec 17 2017 libvtkIOEnSightPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkIOEnSightTCL-6.3.so.6.3 -> libvtkIOEnSightTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 95K Dec 17 2017 libvtkIOEnSightTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOExodus-6.3.so.6.3 -> libvtkIOExodus-6.3.so.6.3.0 -rw-r--r-- 1 root root 660K Dec 17 2017 libvtkIOExodus-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOExodusPython27D-6.3.so.6.3 -> libvtkIOExodusPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 385K Dec 17 2017 libvtkIOExodusPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOExodusTCL-6.3.so.6.3 -> libvtkIOExodusTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 220K Dec 17 2017 libvtkIOExodusTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOExport-6.3.so.6.3 -> libvtkIOExport-6.3.so.6.3.0 -rw-r--r-- 1 root root 513K Dec 17 2017 libvtkIOExport-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOExportPython27D-6.3.so.6.3 -> libvtkIOExportPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 288K Dec 17 2017 libvtkIOExportPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOExportTCL-6.3.so.6.3 -> libvtkIOExportTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 183K Dec 17 2017 libvtkIOExportTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOFFMPEG-6.3.so.6.3 -> libvtkIOFFMPEG-6.3.so.6.3.0 -rw-r--r-- 1 root root 55K Dec 17 2017 libvtkIOFFMPEG-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOFFMPEGPython27D-6.3.so.6.3 -> libvtkIOFFMPEGPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 44K Dec 17 2017 libvtkIOFFMPEGPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOFFMPEGTCL-6.3.so.6.3 -> libvtkIOFFMPEGTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 31K Dec 17 2017 libvtkIOFFMPEGTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkIOGDAL-6.3.so.6.3 -> libvtkIOGDAL-6.3.so.6.3.0 -rw-r--r-- 1 root root 152K Dec 17 2017 libvtkIOGDAL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOGDALPython27D-6.3.so.6.3 -> libvtkIOGDALPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 60K Dec 17 2017 libvtkIOGDALPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOGDALTCL-6.3.so.6.3 -> libvtkIOGDALTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkIOGDALTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOGeoJSON-6.3.so.6.3 -> libvtkIOGeoJSON-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkIOGeoJSON-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkIOGeoJSONPython27D-6.3.so.6.3 -> libvtkIOGeoJSONPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 40K Dec 17 2017 libvtkIOGeoJSONPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkIOGeoJSONTCL-6.3.so.6.3 -> libvtkIOGeoJSONTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 35K Dec 17 2017 libvtkIOGeoJSONTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOGeometry-6.3.so.6.3 -> libvtkIOGeometry-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.1M Dec 17 2017 libvtkIOGeometry-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOGeometryPython27D-6.3.so.6.3 -> libvtkIOGeometryPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 621K Dec 17 2017 libvtkIOGeometryPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkIOGeometryTCL-6.3.so.6.3 -> libvtkIOGeometryTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 400K Dec 17 2017 libvtkIOGeometryTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkIOImage-6.3.so.6.3 -> libvtkIOImage-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.6M Dec 17 2017 libvtkIOImage-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOImagePython27D-6.3.so.6.3 -> libvtkIOImagePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 900K Dec 17 2017 libvtkIOImagePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOImageTCL-6.3.so.6.3 -> libvtkIOImageTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 540K Dec 17 2017 libvtkIOImageTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOImport-6.3.so.6.3 -> libvtkIOImport-6.3.so.6.3.0 -rw-r--r-- 1 root root 276K Dec 17 2017 libvtkIOImport-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOImportPython27D-6.3.so.6.3 -> libvtkIOImportPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 57K Dec 17 2017 libvtkIOImportPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOImportTCL-6.3.so.6.3 -> libvtkIOImportTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkIOImportTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOInfovis-6.3.so.6.3 -> libvtkIOInfovis-6.3.so.6.3.0 -rw-r--r-- 1 root root 562K Dec 17 2017 libvtkIOInfovis-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkIOInfovisPython27D-6.3.so.6.3 -> libvtkIOInfovisPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 320K Dec 17 2017 libvtkIOInfovisPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkIOInfovisTCL-6.3.so.6.3 -> libvtkIOInfovisTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 212K Dec 17 2017 libvtkIOInfovisTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOLSDyna-6.3.so.6.3 -> libvtkIOLSDyna-6.3.so.6.3.0 -rw-r--r-- 1 root root 313K Dec 17 2017 libvtkIOLSDyna-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOLSDynaPython27D-6.3.so.6.3 -> libvtkIOLSDynaPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 130K Dec 17 2017 libvtkIOLSDynaPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOLSDynaTCL-6.3.so.6.3 -> libvtkIOLSDynaTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkIOLSDynaTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOLegacy-6.3.so.6.3 -> libvtkIOLegacy-6.3.so.6.3.0 -rw-r--r-- 1 root root 655K Dec 17 2017 libvtkIOLegacy-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIOLegacyPython27D-6.3.so.6.3 -> libvtkIOLegacyPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 354K Dec 17 2017 libvtkIOLegacyPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOLegacyTCL-6.3.so.6.3 -> libvtkIOLegacyTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 240K Dec 17 2017 libvtkIOLegacyTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkIOMINC-6.3.so.6.3 -> libvtkIOMINC-6.3.so.6.3.0 -rw-r--r-- 1 root root 450K Dec 17 2017 libvtkIOMINC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOMINCPython27D-6.3.so.6.3 -> libvtkIOMINCPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 214K Dec 17 2017 libvtkIOMINCPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOMINCTCL-6.3.so.6.3 -> libvtkIOMINCTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 151K Dec 17 2017 libvtkIOMINCTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOMPIImage-6.3.so.6.3 -> libvtkIOMPIImage-6.3.so.6.3.0 -rw-r--r-- 1 root root 161K Dec 17 2017 libvtkIOMPIImage-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOMPIImagePython27D-6.3.so.6.3 -> libvtkIOMPIImagePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 28K Dec 17 2017 libvtkIOMPIImagePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkIOMPIImageTCL-6.3.so.6.3 -> libvtkIOMPIImageTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 23K Dec 17 2017 libvtkIOMPIImageTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkIOMPIParallel-6.3.so.6.3 -> libvtkIOMPIParallel-6.3.so.6.3.0 -rw-r--r-- 1 root root 217K Dec 17 2017 libvtkIOMPIParallel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkIOMPIParallelPython27D-6.3.so.6.3 -> libvtkIOMPIParallelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkIOMPIParallelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOMPIParallelTCL-6.3.so.6.3 -> libvtkIOMPIParallelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkIOMPIParallelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkIOMovie-6.3.so.6.3 -> libvtkIOMovie-6.3.so.6.3.0 -rw-r--r-- 1 root root 59K Dec 17 2017 libvtkIOMovie-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOMoviePython27D-6.3.so.6.3 -> libvtkIOMoviePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 49K Dec 17 2017 libvtkIOMoviePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOMovieTCL-6.3.so.6.3 -> libvtkIOMovieTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 39K Dec 17 2017 libvtkIOMovieTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkIOMySQL-6.3.so.6.3 -> libvtkIOMySQL-6.3.so.6.3.0 -rw-r--r-- 1 root root 144K Dec 17 2017 libvtkIOMySQL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOMySQLPython27D-6.3.so.6.3 -> libvtkIOMySQLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 86K Dec 17 2017 libvtkIOMySQLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOMySQLTCL-6.3.so.6.3 -> libvtkIOMySQLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkIOMySQLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIONetCDF-6.3.so.6.3 -> libvtkIONetCDF-6.3.so.6.3.0 -rw-r--r-- 1 root root 410K Dec 17 2017 libvtkIONetCDF-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkIONetCDFPython27D-6.3.so.6.3 -> libvtkIONetCDFPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 201K Dec 17 2017 libvtkIONetCDFPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIONetCDFTCL-6.3.so.6.3 -> libvtkIONetCDFTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 139K Dec 17 2017 libvtkIONetCDFTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkIOODBC-6.3.so.6.3 -> libvtkIOODBC-6.3.so.6.3.0 -rw-r--r-- 1 root root 156K Dec 17 2017 libvtkIOODBC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOODBCPython27D-6.3.so.6.3 -> libvtkIOODBCPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 65K Dec 17 2017 libvtkIOODBCPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOODBCTCL-6.3.so.6.3 -> libvtkIOODBCTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkIOODBCTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 24 Dec 17 2017 libvtkIOPLY-6.3.so.6.3 -> libvtkIOPLY-6.3.so.6.3.0 -rw-r--r-- 1 root root 97K Dec 17 2017 libvtkIOPLY-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkIOPLYPython27D-6.3.so.6.3 -> libvtkIOPLYPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 73K Dec 17 2017 libvtkIOPLYPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOPLYTCL-6.3.so.6.3 -> libvtkIOPLYTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 43K Dec 17 2017 libvtkIOPLYTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOParallel-6.3.so.6.3 -> libvtkIOParallel-6.3.so.6.3.0 -rw-r--r-- 1 root root 526K Dec 17 2017 libvtkIOParallel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOParallelExodus-6.3.so.6.3 -> libvtkIOParallelExodus-6.3.so.6.3.0 -rw-r--r-- 1 root root 148K Dec 17 2017 libvtkIOParallelExodus-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkIOParallelExodusPython27D-6.3.so.6.3 -> libvtkIOParallelExodusPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 48K Dec 17 2017 libvtkIOParallelExodusPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOParallelExodusTCL-6.3.so.6.3 -> libvtkIOParallelExodusTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 35K Dec 17 2017 libvtkIOParallelExodusTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOParallelLSDyna-6.3.so.6.3 -> libvtkIOParallelLSDyna-6.3.so.6.3.0 -rw-r--r-- 1 root root 55K Dec 17 2017 libvtkIOParallelLSDyna-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkIOParallelLSDynaPython27D-6.3.so.6.3 -> libvtkIOParallelLSDynaPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 19K Dec 17 2017 libvtkIOParallelLSDynaPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOParallelLSDynaTCL-6.3.so.6.3 -> libvtkIOParallelLSDynaTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 19K Dec 17 2017 libvtkIOParallelLSDynaTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOParallelNetCDF-6.3.so.6.3 -> libvtkIOParallelNetCDF-6.3.so.6.3.0 -rw-r--r-- 1 root root 113K Dec 17 2017 libvtkIOParallelNetCDF-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkIOParallelNetCDFPython27D-6.3.so.6.3 -> libvtkIOParallelNetCDFPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 32K Dec 17 2017 libvtkIOParallelNetCDFPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOParallelNetCDFTCL-6.3.so.6.3 -> libvtkIOParallelNetCDFTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkIOParallelNetCDFTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkIOParallelPython27D-6.3.so.6.3 -> libvtkIOParallelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 221K Dec 17 2017 libvtkIOParallelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkIOParallelTCL-6.3.so.6.3 -> libvtkIOParallelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 139K Dec 17 2017 libvtkIOParallelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkIOParallelXML-6.3.so.6.3 -> libvtkIOParallelXML-6.3.so.6.3.0 -rw-r--r-- 1 root root 204K Dec 17 2017 libvtkIOParallelXML-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkIOParallelXMLPython27D-6.3.so.6.3 -> libvtkIOParallelXMLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 95K Dec 17 2017 libvtkIOParallelXMLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOParallelXMLTCL-6.3.so.6.3 -> libvtkIOParallelXMLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 87K Dec 17 2017 libvtkIOParallelXMLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkIOPostgreSQL-6.3.so.6.3 -> libvtkIOPostgreSQL-6.3.so.6.3.0 -rw-r--r-- 1 root root 160K Dec 17 2017 libvtkIOPostgreSQL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkIOPostgreSQLPython27D-6.3.so.6.3 -> libvtkIOPostgreSQLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 73K Dec 17 2017 libvtkIOPostgreSQLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOPostgreSQLTCL-6.3.so.6.3 -> libvtkIOPostgreSQLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 59K Dec 17 2017 libvtkIOPostgreSQLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 24 Dec 17 2017 libvtkIOSQL-6.3.so.6.3 -> libvtkIOSQL-6.3.so.6.3.0 -rw-r--r-- 1 root root 277K Dec 17 2017 libvtkIOSQL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkIOSQLPython27D-6.3.so.6.3 -> libvtkIOSQLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 202K Dec 17 2017 libvtkIOSQLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOSQLTCL-6.3.so.6.3 -> libvtkIOSQLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 148K Dec 17 2017 libvtkIOSQLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkIOVPIC-6.3.so.6.3 -> libvtkIOVPIC-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkIOVPIC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkIOVPICPython27D-6.3.so.6.3 -> libvtkIOVPICPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 44K Dec 17 2017 libvtkIOVPICPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkIOVPICTCL-6.3.so.6.3 -> libvtkIOVPICTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 39K Dec 17 2017 libvtkIOVPICTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkIOVideo-6.3.so.6.3 -> libvtkIOVideo-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkIOVideo-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOVideoPython27D-6.3.so.6.3 -> libvtkIOVideoPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 85K Dec 17 2017 libvtkIOVideoPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 29 Dec 17 2017 libvtkIOVideoTCL-6.3.so.6.3 -> libvtkIOVideoTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 43K Dec 17 2017 libvtkIOVideoTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 24 Dec 17 2017 libvtkIOXML-6.3.so.6.3 -> libvtkIOXML-6.3.so.6.3.0 -rw-r--r-- 1 root root 945K Dec 17 2017 libvtkIOXML-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOXMLParser-6.3.so.6.3 -> libvtkIOXMLParser-6.3.so.6.3.0 -rw-r--r-- 1 root root 112K Dec 17 2017 libvtkIOXMLParser-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkIOXMLParserPython27D-6.3.so.6.3 -> libvtkIOXMLParserPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 69K Dec 17 2017 libvtkIOXMLParserPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkIOXMLParserTCL-6.3.so.6.3 -> libvtkIOXMLParserTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 51K Dec 17 2017 libvtkIOXMLParserTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkIOXMLPython27D-6.3.so.6.3 -> libvtkIOXMLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 375K Dec 17 2017 libvtkIOXMLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 27 Dec 17 2017 libvtkIOXMLTCL-6.3.so.6.3 -> libvtkIOXMLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 304K Dec 17 2017 libvtkIOXMLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkIOXdmf2-6.3.so.6.3 -> libvtkIOXdmf2-6.3.so.6.3.0 -rw-r--r-- 1 root root 335K Dec 17 2017 libvtkIOXdmf2-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkIOXdmf2Python27D-6.3.so.6.3 -> libvtkIOXdmf2Python27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 93K Dec 17 2017 libvtkIOXdmf2Python27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkIOXdmfIITCL-6.3.so.6.3 -> libvtkIOXdmfIITCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 67K Dec 17 2017 libvtkIOXdmfIITCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkImagingColor-6.3.so.6.3 -> libvtkImagingColor-6.3.so.6.3.0 -rw-r--r-- 1 root root 280K Dec 17 2017 libvtkImagingColor-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkImagingColorPython27D-6.3.so.6.3 -> libvtkImagingColorPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 85K Dec 17 2017 libvtkImagingColorPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkImagingColorTCL-6.3.so.6.3 -> libvtkImagingColorTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 75K Dec 17 2017 libvtkImagingColorTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkImagingCore-6.3.so.6.3 -> libvtkImagingCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.9M Dec 17 2017 libvtkImagingCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkImagingCorePython27D-6.3.so.6.3 -> libvtkImagingCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 934K Dec 17 2017 libvtkImagingCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingCoreTCL-6.3.so.6.3 -> libvtkImagingCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 625K Dec 17 2017 libvtkImagingCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingFourier-6.3.so.6.3 -> libvtkImagingFourier-6.3.so.6.3.0 -rw-r--r-- 1 root root 187K Dec 17 2017 libvtkImagingFourier-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkImagingFourierPython27D-6.3.so.6.3 -> libvtkImagingFourierPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 103K Dec 17 2017 libvtkImagingFourierPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkImagingFourierTCL-6.3.so.6.3 -> libvtkImagingFourierTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 87K Dec 17 2017 libvtkImagingFourierTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingGeneral-6.3.so.6.3 -> libvtkImagingGeneral-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.1M Dec 17 2017 libvtkImagingGeneral-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkImagingGeneralPython27D-6.3.so.6.3 -> libvtkImagingGeneralPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 378K Dec 17 2017 libvtkImagingGeneralPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkImagingGeneralTCL-6.3.so.6.3 -> libvtkImagingGeneralTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 348K Dec 17 2017 libvtkImagingGeneralTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkImagingHybrid-6.3.so.6.3 -> libvtkImagingHybrid-6.3.so.6.3.0 -rw-r--r-- 1 root root 505K Dec 17 2017 libvtkImagingHybrid-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkImagingHybridPython27D-6.3.so.6.3 -> libvtkImagingHybridPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 539K Dec 17 2017 libvtkImagingHybridPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkImagingHybridTCL-6.3.so.6.3 -> libvtkImagingHybridTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 315K Dec 17 2017 libvtkImagingHybridTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkImagingMath-6.3.so.6.3 -> libvtkImagingMath-6.3.so.6.3.0 -rw-r--r-- 1 root root 296K Dec 17 2017 libvtkImagingMath-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkImagingMathPython27D-6.3.so.6.3 -> libvtkImagingMathPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 143K Dec 17 2017 libvtkImagingMathPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingMathTCL-6.3.so.6.3 -> libvtkImagingMathTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 99K Dec 17 2017 libvtkImagingMathTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkImagingMorphological-6.3.so.6.3 -> libvtkImagingMorphological-6.3.so.6.3.0 -rw-r--r-- 1 root root 536K Dec 17 2017 libvtkImagingMorphological-6.3.so.6.3.0 lrwxrwxrwx 1 root root 48 Dec 17 2017 libvtkImagingMorphologicalPython27D-6.3.so.6.3 -> libvtkImagingMorphologicalPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 205K Dec 17 2017 libvtkImagingMorphologicalPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkImagingMorphologicalTCL-6.3.so.6.3 -> libvtkImagingMorphologicalTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 143K Dec 17 2017 libvtkImagingMorphologicalTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingSources-6.3.so.6.3 -> libvtkImagingSources-6.3.so.6.3.0 -rw-r--r-- 1 root root 331K Dec 17 2017 libvtkImagingSources-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkImagingSourcesPython27D-6.3.so.6.3 -> libvtkImagingSourcesPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 253K Dec 17 2017 libvtkImagingSourcesPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkImagingSourcesTCL-6.3.so.6.3 -> libvtkImagingSourcesTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 183K Dec 17 2017 libvtkImagingSourcesTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkImagingStatistics-6.3.so.6.3 -> libvtkImagingStatistics-6.3.so.6.3.0 -rw-r--r-- 1 root root 156K Dec 17 2017 libvtkImagingStatistics-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkImagingStatisticsPython27D-6.3.so.6.3 -> libvtkImagingStatisticsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 138K Dec 17 2017 libvtkImagingStatisticsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkImagingStatisticsTCL-6.3.so.6.3 -> libvtkImagingStatisticsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 99K Dec 17 2017 libvtkImagingStatisticsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkImagingStencil-6.3.so.6.3 -> libvtkImagingStencil-6.3.so.6.3.0 -rw-r--r-- 1 root root 212K Dec 17 2017 libvtkImagingStencil-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkImagingStencilPython27D-6.3.so.6.3 -> libvtkImagingStencilPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 147K Dec 17 2017 libvtkImagingStencilPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkImagingStencilTCL-6.3.so.6.3 -> libvtkImagingStencilTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 107K Dec 17 2017 libvtkImagingStencilTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkInfovisBoostGraphAlgorithms-6.3.so.6.3 -> libvtkInfovisBoostGraphAlgorithms-6.3.so.6.3.0 -rw-r--r-- 1 root root 477K Dec 17 2017 libvtkInfovisBoostGraphAlgorithms-6.3.so.6.3.0 lrwxrwxrwx 1 root root 55 Dec 17 2017 libvtkInfovisBoostGraphAlgorithmsPython27D-6.3.so.6.3 -> libvtkInfovisBoostGraphAlgorithmsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 205K Dec 17 2017 libvtkInfovisBoostGraphAlgorithmsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 49 Dec 17 2017 libvtkInfovisBoostGraphAlgorithmsTCL-6.3.so.6.3 -> libvtkInfovisBoostGraphAlgorithmsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 163K Dec 17 2017 libvtkInfovisBoostGraphAlgorithmsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkInfovisCore-6.3.so.6.3 -> libvtkInfovisCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 1008K Dec 17 2017 libvtkInfovisCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkInfovisCorePython27D-6.3.so.6.3 -> libvtkInfovisCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 752K Dec 17 2017 libvtkInfovisCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkInfovisCoreTCL-6.3.so.6.3 -> libvtkInfovisCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 536K Dec 17 2017 libvtkInfovisCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkInfovisLayout-6.3.so.6.3 -> libvtkInfovisLayout-6.3.so.6.3.0 -rw-r--r-- 1 root root 670K Dec 17 2017 libvtkInfovisLayout-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkInfovisLayoutPython27D-6.3.so.6.3 -> libvtkInfovisLayoutPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 897K Dec 17 2017 libvtkInfovisLayoutPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkInfovisLayoutTCL-6.3.so.6.3 -> libvtkInfovisLayoutTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 600K Dec 17 2017 libvtkInfovisLayoutTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkInteractionImage-6.3.so.6.3 -> libvtkInteractionImage-6.3.so.6.3.0 -rw-r--r-- 1 root root 152K Dec 17 2017 libvtkInteractionImage-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkInteractionImagePython27D-6.3.so.6.3 -> libvtkInteractionImagePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 144K Dec 17 2017 libvtkInteractionImagePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkInteractionImageTCL-6.3.so.6.3 -> libvtkInteractionImageTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 91K Dec 17 2017 libvtkInteractionImageTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkInteractionStyle-6.3.so.6.3 -> libvtkInteractionStyle-6.3.so.6.3.0 -rw-r--r-- 1 root root 537K Dec 17 2017 libvtkInteractionStyle-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkInteractionStylePython27D-6.3.so.6.3 -> libvtkInteractionStylePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 349K Dec 17 2017 libvtkInteractionStylePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkInteractionStyleTCL-6.3.so.6.3 -> libvtkInteractionStyleTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 219K Dec 17 2017 libvtkInteractionStyleTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkInteractionWidgets-6.3.so.6.3 -> libvtkInteractionWidgets-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.2M Dec 17 2017 libvtkInteractionWidgets-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkInteractionWidgetsPython27D-6.3.so.6.3 -> libvtkInteractionWidgetsPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.6M Dec 17 2017 libvtkInteractionWidgetsPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkInteractionWidgetsTCL-6.3.so.6.3 -> libvtkInteractionWidgetsTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.1M Dec 17 2017 libvtkInteractionWidgetsTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkLocalExample-6.3.so.6.3 -> libvtkLocalExample-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkLocalExample-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkLocalExampleTCL-6.3.so.6.3 -> libvtkLocalExampleTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkLocalExampleTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkParallelCore-6.3.so.6.3 -> libvtkParallelCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 305K Dec 17 2017 libvtkParallelCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkParallelCorePython27D-6.3.so.6.3 -> libvtkParallelCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 376K Dec 17 2017 libvtkParallelCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkParallelCoreTCL-6.3.so.6.3 -> libvtkParallelCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 152K Dec 17 2017 libvtkParallelCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkParallelMPI-6.3.so.6.3 -> libvtkParallelMPI-6.3.so.6.3.0 -rw-r--r-- 1 root root 149K Dec 17 2017 libvtkParallelMPI-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkParallelMPI4Py-6.3.so.6.3 -> libvtkParallelMPI4Py-6.3.so.6.3.0 -rw-r--r-- 1 root root 68K Dec 17 2017 libvtkParallelMPI4Py-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkParallelMPI4PyPython27D-6.3.so.6.3 -> libvtkParallelMPI4PyPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkParallelMPI4PyPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkParallelMPIPython27D-6.3.so.6.3 -> libvtkParallelMPIPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 73K Dec 17 2017 libvtkParallelMPIPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkParallelMPITCL-6.3.so.6.3 -> libvtkParallelMPITCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 35K Dec 17 2017 libvtkParallelMPITCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkPythonInterpreter-6.3.so.6.3 -> libvtkPythonInterpreter-6.3.so.6.3.0 -rw-r--r-- 1 root root 40K Dec 17 2017 libvtkPythonInterpreter-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkPythonInterpreterPython27D-6.3.so.6.3 -> libvtkPythonInterpreterPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 28K Dec 17 2017 libvtkPythonInterpreterPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkPythonInterpreterTCL-6.3.so.6.3 -> libvtkPythonInterpreterTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkPythonInterpreterTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkRenderingAnnotation-6.3.so.6.3 -> libvtkRenderingAnnotation-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.3M Dec 17 2017 libvtkRenderingAnnotation-6.3.so.6.3.0 lrwxrwxrwx 1 root root 47 Dec 17 2017 libvtkRenderingAnnotationPython27D-6.3.so.6.3 -> libvtkRenderingAnnotationPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.9M Dec 17 2017 libvtkRenderingAnnotationPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkRenderingAnnotationTCL-6.3.so.6.3 -> libvtkRenderingAnnotationTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 945K Dec 17 2017 libvtkRenderingAnnotationTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkRenderingContext2D-6.3.so.6.3 -> libvtkRenderingContext2D-6.3.so.6.3.0 -rw-r--r-- 1 root root 252K Dec 17 2017 libvtkRenderingContext2D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkRenderingContext2DPython27D-6.3.so.6.3 -> libvtkRenderingContext2DPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 320K Dec 17 2017 libvtkRenderingContext2DPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkRenderingContextIIDTCL-6.3.so.6.3 -> libvtkRenderingContextIIDTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 212K Dec 17 2017 libvtkRenderingContextIIDTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkRenderingContextOpenGL-6.3.so.6.3 -> libvtkRenderingContextOpenGL-6.3.so.6.3.0 -rw-r--r-- 1 root root 196K Dec 17 2017 libvtkRenderingContextOpenGL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 50 Dec 17 2017 libvtkRenderingContextOpenGLPython27D-6.3.so.6.3 -> libvtkRenderingContextOpenGLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkRenderingContextOpenGLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkRenderingContextOpenGLTCL-6.3.so.6.3 -> libvtkRenderingContextOpenGLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkRenderingContextOpenGLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 32 Dec 17 2017 libvtkRenderingCore-6.3.so.6.3 -> libvtkRenderingCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.5M Dec 17 2017 libvtkRenderingCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkRenderingCorePython27D-6.3.so.6.3 -> libvtkRenderingCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 3.6M Dec 17 2017 libvtkRenderingCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkRenderingCoreTCL-6.3.so.6.3 -> libvtkRenderingCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.2M Dec 17 2017 libvtkRenderingCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkRenderingExternal-6.3.so.6.3 -> libvtkRenderingExternal-6.3.so.6.3.0 -rw-r--r-- 1 root root 236K Dec 17 2017 libvtkRenderingExternal-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkRenderingExternalPython27D-6.3.so.6.3 -> libvtkRenderingExternalPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 86K Dec 17 2017 libvtkRenderingExternalPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkRenderingExternalTCL-6.3.so.6.3 -> libvtkRenderingExternalTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkRenderingExternalTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkRenderingFreeType-6.3.so.6.3 -> libvtkRenderingFreeType-6.3.so.6.3.0 -rw-r--r-- 1 root root 781K Dec 17 2017 libvtkRenderingFreeType-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkRenderingFreeTypeFontConfig-6.3.so.6.3 -> libvtkRenderingFreeTypeFontConfig-6.3.so.6.3.0 -rw-r--r-- 1 root root 39K Dec 17 2017 libvtkRenderingFreeTypeFontConfig-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkRenderingFreeTypePython27D-6.3.so.6.3 -> libvtkRenderingFreeTypePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 61K Dec 17 2017 libvtkRenderingFreeTypePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkRenderingFreeTypeTCL-6.3.so.6.3 -> libvtkRenderingFreeTypeTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 47K Dec 17 2017 libvtkRenderingFreeTypeTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkRenderingGL2PS-6.3.so.6.3 -> libvtkRenderingGL2PS-6.3.so.6.3.0 -rw-r--r-- 1 root root 112K Dec 17 2017 libvtkRenderingGL2PS-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkRenderingGL2PSPython27D-6.3.so.6.3 -> libvtkRenderingGL2PSPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 23K Dec 17 2017 libvtkRenderingGL2PSPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkRenderingGLtoPSTCL-6.3.so.6.3 -> libvtkRenderingGLtoPSTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 19K Dec 17 2017 libvtkRenderingGLtoPSTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkRenderingImage-6.3.so.6.3 -> libvtkRenderingImage-6.3.so.6.3.0 -rw-r--r-- 1 root root 136K Dec 17 2017 libvtkRenderingImage-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkRenderingImagePython27D-6.3.so.6.3 -> libvtkRenderingImagePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 110K Dec 17 2017 libvtkRenderingImagePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkRenderingImageTCL-6.3.so.6.3 -> libvtkRenderingImageTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 63K Dec 17 2017 libvtkRenderingImageTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkRenderingLIC-6.3.so.6.3 -> libvtkRenderingLIC-6.3.so.6.3.0 -rw-r--r-- 1 root root 647K Dec 17 2017 libvtkRenderingLIC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkRenderingLICPython27D-6.3.so.6.3 -> libvtkRenderingLICPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 164K Dec 17 2017 libvtkRenderingLICPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkRenderingLICTCL-6.3.so.6.3 -> libvtkRenderingLICTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 99K Dec 17 2017 libvtkRenderingLICTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkRenderingLOD-6.3.so.6.3 -> libvtkRenderingLOD-6.3.so.6.3.0 -rw-r--r-- 1 root root 99K Dec 17 2017 libvtkRenderingLOD-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkRenderingLODPython27D-6.3.so.6.3 -> libvtkRenderingLODPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 106K Dec 17 2017 libvtkRenderingLODPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkRenderingLODTCL-6.3.so.6.3 -> libvtkRenderingLODTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 59K Dec 17 2017 libvtkRenderingLODTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkRenderingLabel-6.3.so.6.3 -> libvtkRenderingLabel-6.3.so.6.3.0 -rw-r--r-- 1 root root 689K Dec 17 2017 libvtkRenderingLabel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkRenderingLabelPython27D-6.3.so.6.3 -> libvtkRenderingLabelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 434K Dec 17 2017 libvtkRenderingLabelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkRenderingLabelTCL-6.3.so.6.3 -> libvtkRenderingLabelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 239K Dec 17 2017 libvtkRenderingLabelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkRenderingMatplotlib-6.3.so.6.3 -> libvtkRenderingMatplotlib-6.3.so.6.3.0 -rw-r--r-- 1 root root 67K Dec 17 2017 libvtkRenderingMatplotlib-6.3.so.6.3.0 lrwxrwxrwx 1 root root 47 Dec 17 2017 libvtkRenderingMatplotlibPython27D-6.3.so.6.3 -> libvtkRenderingMatplotlibPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 23K Dec 17 2017 libvtkRenderingMatplotlibPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 41 Dec 17 2017 libvtkRenderingMatplotlibTCL-6.3.so.6.3 -> libvtkRenderingMatplotlibTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 23K Dec 17 2017 libvtkRenderingMatplotlibTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkRenderingOpenGL-6.3.so.6.3 -> libvtkRenderingOpenGL-6.3.so.6.3.0 -rw-r--r-- 1 root root 2.7M Dec 17 2017 libvtkRenderingOpenGL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkRenderingOpenGLPython27D-6.3.so.6.3 -> libvtkRenderingOpenGLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 983K Dec 17 2017 libvtkRenderingOpenGLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkRenderingOpenGLTCL-6.3.so.6.3 -> libvtkRenderingOpenGLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 711K Dec 17 2017 libvtkRenderingOpenGLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 36 Dec 17 2017 libvtkRenderingParallel-6.3.so.6.3 -> libvtkRenderingParallel-6.3.so.6.3.0 -rw-r--r-- 1 root root 341K Dec 17 2017 libvtkRenderingParallel-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkRenderingParallelLIC-6.3.so.6.3 -> libvtkRenderingParallelLIC-6.3.so.6.3.0 -rw-r--r-- 1 root root 621K Dec 17 2017 libvtkRenderingParallelLIC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 48 Dec 17 2017 libvtkRenderingParallelLICPython27D-6.3.so.6.3 -> libvtkRenderingParallelLICPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 48K Dec 17 2017 libvtkRenderingParallelLICPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkRenderingParallelLICTCL-6.3.so.6.3 -> libvtkRenderingParallelLICTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 39K Dec 17 2017 libvtkRenderingParallelLICTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 45 Dec 17 2017 libvtkRenderingParallelPython27D-6.3.so.6.3 -> libvtkRenderingParallelPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 284K Dec 17 2017 libvtkRenderingParallelPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkRenderingParallelTCL-6.3.so.6.3 -> libvtkRenderingParallelTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 179K Dec 17 2017 libvtkRenderingParallelTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkRenderingTkTCL-6.3.so.6.3 -> libvtkRenderingTkTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 104K Dec 17 2017 libvtkRenderingTkTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkRenderingVolume-6.3.so.6.3 -> libvtkRenderingVolume-6.3.so.6.3.0 -rw-r--r-- 1 root root 4.2M Dec 17 2017 libvtkRenderingVolume-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkRenderingVolumeAMR-6.3.so.6.3 -> libvtkRenderingVolumeAMR-6.3.so.6.3.0 -rw-r--r-- 1 root root 87K Dec 17 2017 libvtkRenderingVolumeAMR-6.3.so.6.3.0 lrwxrwxrwx 1 root root 46 Dec 17 2017 libvtkRenderingVolumeAMRPython27D-6.3.so.6.3 -> libvtkRenderingVolumeAMRPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 69K Dec 17 2017 libvtkRenderingVolumeAMRPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkRenderingVolumeAMRTCL-6.3.so.6.3 -> libvtkRenderingVolumeAMRTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 39K Dec 17 2017 libvtkRenderingVolumeAMRTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkRenderingVolumeOpenGL-6.3.so.6.3 -> libvtkRenderingVolumeOpenGL-6.3.so.6.3.0 -rw-r--r-- 1 root root 670K Dec 17 2017 libvtkRenderingVolumeOpenGL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 49 Dec 17 2017 libvtkRenderingVolumeOpenGLPython27D-6.3.so.6.3 -> libvtkRenderingVolumeOpenGLPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 127K Dec 17 2017 libvtkRenderingVolumeOpenGLPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkRenderingVolumeOpenGLTCL-6.3.so.6.3 -> libvtkRenderingVolumeOpenGLTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 91K Dec 17 2017 libvtkRenderingVolumeOpenGLTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 43 Dec 17 2017 libvtkRenderingVolumePython27D-6.3.so.6.3 -> libvtkRenderingVolumePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.1M Dec 17 2017 libvtkRenderingVolumePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkRenderingVolumeTCL-6.3.so.6.3 -> libvtkRenderingVolumeTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 620K Dec 17 2017 libvtkRenderingVolumeTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkTestingGenericBridge-6.3.so.6.3 -> libvtkTestingGenericBridge-6.3.so.6.3.0 -rw-r--r-- 1 root root 132K Dec 17 2017 libvtkTestingGenericBridge-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkTestingIOSQL-6.3.so.6.3 -> libvtkTestingIOSQL-6.3.so.6.3.0 -rw-r--r-- 1 root root 15K Dec 17 2017 libvtkTestingIOSQL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 35 Dec 17 2017 libvtkTestingRendering-6.3.so.6.3 -> libvtkTestingRendering-6.3.so.6.3.0 -rw-r--r-- 1 root root 136K Dec 17 2017 libvtkTestingRendering-6.3.so.6.3.0 lrwxrwxrwx 1 root root 44 Dec 17 2017 libvtkTestingRenderingPython27D-6.3.so.6.3 -> libvtkTestingRenderingPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 61K Dec 17 2017 libvtkTestingRenderingPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 38 Dec 17 2017 libvtkTestingRenderingTCL-6.3.so.6.3 -> libvtkTestingRenderingTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 43K Dec 17 2017 libvtkTestingRenderingTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 23 Dec 17 2017 libvtkVPIC-6.3.so.6.3 -> libvtkVPIC-6.3.so.6.3.0 -rw-r--r-- 1 root root 95K Dec 17 2017 libvtkVPIC-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkViewsContext2D-6.3.so.6.3 -> libvtkViewsContext2D-6.3.so.6.3.0 -rw-r--r-- 1 root root 75K Dec 17 2017 libvtkViewsContext2D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 42 Dec 17 2017 libvtkViewsContext2DPython27D-6.3.so.6.3 -> libvtkViewsContext2DPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 32K Dec 17 2017 libvtkViewsContext2DPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkViewsContextIIDTCL-6.3.so.6.3 -> libvtkViewsContextIIDTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkViewsContextIIDTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 28 Dec 17 2017 libvtkViewsCore-6.3.so.6.3 -> libvtkViewsCore-6.3.so.6.3.0 -rw-r--r-- 1 root root 172K Dec 17 2017 libvtkViewsCore-6.3.so.6.3.0 lrwxrwxrwx 1 root root 37 Dec 17 2017 libvtkViewsCorePython27D-6.3.so.6.3 -> libvtkViewsCorePython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 181K Dec 17 2017 libvtkViewsCorePython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkViewsCoreTCL-6.3.so.6.3 -> libvtkViewsCoreTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 119K Dec 17 2017 libvtkViewsCoreTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 30 Dec 17 2017 libvtkViewsGeovis-6.3.so.6.3 -> libvtkViewsGeovis-6.3.so.6.3.0 -rw-r--r-- 1 root root 71K Dec 17 2017 libvtkViewsGeovis-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkViewsGeovisPython27D-6.3.so.6.3 -> libvtkViewsGeovisPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 32K Dec 17 2017 libvtkViewsGeovisPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 33 Dec 17 2017 libvtkViewsGeovisTCL-6.3.so.6.3 -> libvtkViewsGeovisTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 27K Dec 17 2017 libvtkViewsGeovisTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkViewsInfovis-6.3.so.6.3 -> libvtkViewsInfovis-6.3.so.6.3.0 -rw-r--r-- 1 root root 1.1M Dec 17 2017 libvtkViewsInfovis-6.3.so.6.3.0 lrwxrwxrwx 1 root root 40 Dec 17 2017 libvtkViewsInfovisPython27D-6.3.so.6.3 -> libvtkViewsInfovisPython27D-6.3.so.6.3.0 -rw-r--r-- 1 root root 927K Dec 17 2017 libvtkViewsInfovisPython27D-6.3.so.6.3.0 lrwxrwxrwx 1 root root 34 Dec 17 2017 libvtkViewsInfovisTCL-6.3.so.6.3 -> libvtkViewsInfovisTCL-6.3.so.6.3.0 -rw-r--r-- 1 root root 565K Dec 17 2017 libvtkViewsInfovisTCL-6.3.so.6.3.0 lrwxrwxrwx 1 root root 31 Dec 17 2017 libvtkWrappingJava-6.3.so.6.3 -> libvtkWrappingJava-6.3.so.6.3.0 -rw-r--r-- 1 root root 19K Dec 17 2017 libvtkWrappingJava-6.3.so.6.3.0 lrwxrwxrwx 1 root root 39 Dec 17 2017 libvtkWrappingPython27Core-6.3.so.6.3 -> libvtkWrappingPython27Core-6.3.so.6.3.0 -rw-r--r-- 1 root root 180K Dec 17 2017 libvtkWrappingPython27Core-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkalglib-6.3.so.6.3 -> libvtkalglib-6.3.so.6.3.0 -rw-r--r-- 1 root root 139K Dec 17 2017 libvtkalglib-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkexoIIc-6.3.so.6.3 -> libvtkexoIIc-6.3.so.6.3.0 -rw-r--r-- 1 root root 288K Dec 17 2017 libvtkexoIIc-6.3.so.6.3.0 lrwxrwxrwx 1 root root 23 Dec 17 2017 libvtkftgl-6.3.so.6.3 -> libvtkftgl-6.3.so.6.3.0 -rw-r--r-- 1 root root 43K Dec 17 2017 libvtkftgl-6.3.so.6.3.0 lrwxrwxrwx 1 root root 25 Dec 17 2017 libvtkmetaio-6.3.so.6.3 -> libvtkmetaio-6.3.so.6.3.0 -rw-r--r-- 1 root root 591K Dec 17 2017 libvtkmetaio-6.3.so.6.3.0 lrwxrwxrwx 1 root root 22 Dec 17 2017 libvtksys-6.3.so.6.3 -> libvtksys-6.3.so.6.3.0 -rw-r--r-- 1 root root 278K Dec 17 2017 libvtksys-6.3.so.6.3.0 lrwxrwxrwx 1 root root 26 Dec 17 2017 libvtkverdict-6.3.so.6.3 -> libvtkverdict-6.3.so.6.3.0 -rw-r--r-- 1 root root 195K Dec 17 2017 libvtkverdict-6.3.so.6.3.0 lrwxrwxrwx 1 root root 24 Dec 17 2017 libvtkxdmf2-6.3.so.6.3 -> libvtkxdmf2-6.3.so.6.3.0 -rw-r--r-- 1 root root 472K Dec 17 2017 libvtkxdmf2-6.3.so.6.3.0 lrwxrwxrwx 1 root root 19 Jan 5 2021 libwavpack.so.1 -> libwavpack.so.1.2.0 -rw-r--r-- 1 root root 167K Jan 5 2021 libwavpack.so.1.2.0 lrwxrwxrwx 1 root root 26 Feb 1 2020 libwayland-client.so.0 -> libwayland-client.so.0.3.0 -rw-r--r-- 1 root root 60K Feb 1 2020 libwayland-client.so.0.3.0 lrwxrwxrwx 1 root root 26 Feb 1 2020 libwayland-cursor.so.0 -> libwayland-cursor.so.0.0.0 -rw-r--r-- 1 root root 31K Feb 1 2020 libwayland-cursor.so.0.0.0 lrwxrwxrwx 1 root root 23 Feb 1 2020 libwayland-egl.so.1 -> libwayland-egl.so.1.0.0 -rw-r--r-- 1 root root 5.9K Feb 1 2020 libwayland-egl.so.1.0.0 lrwxrwxrwx 1 root root 16 May 20 2021 libwebp.so.6 -> libwebp.so.6.0.2 -rw-r--r-- 1 root root 411K May 20 2021 libwebp.so.6.0.2 lrwxrwxrwx 1 root root 19 May 20 2021 libwebpmux.so.3 -> libwebpmux.so.3.0.1 -rw-r--r-- 1 root root 39K May 20 2021 libwebpmux.so.3.0.1 lrwxrwxrwx 1 root root 16 Dec 15 2017 libwind.so.0 -> libwind.so.0.0.0 -rw-r--r-- 1 root root 162K Dec 15 2017 libwind.so.0.0.0 -rw-r--r-- 1 root root 1.2M Jan 19 2018 libx264.so.152 -rw-r--r-- 1 root root 11M Dec 31 2017 libx265.so.146 lrwxrwxrwx 1 root root 20 Jun 21 2018 libxcb-dri2.so.0 -> libxcb-dri2.so.0.0.0 -rw-r--r-- 1 root root 19K Jun 21 2018 libxcb-dri2.so.0.0.0 lrwxrwxrwx 1 root root 20 Jun 21 2018 libxcb-dri3.so.0 -> libxcb-dri3.so.0.0.0 -rw-r--r-- 1 root root 15K Jun 21 2018 libxcb-dri3.so.0.0.0 lrwxrwxrwx 1 root root 19 Jun 21 2018 libxcb-glx.so.0 -> libxcb-glx.so.0.0.0 -rw-r--r-- 1 root root 107K Jun 21 2018 libxcb-glx.so.0.0.0 lrwxrwxrwx 1 root root 23 Jun 21 2018 libxcb-present.so.0 -> libxcb-present.so.0.0.0 -rw-r--r-- 1 root root 11K Jun 21 2018 libxcb-present.so.0.0.0 lrwxrwxrwx 1 root root 22 Jun 21 2018 libxcb-render.so.0 -> libxcb-render.so.0.0.0 -rw-r--r-- 1 root root 51K Jun 21 2018 libxcb-render.so.0.0.0 lrwxrwxrwx 1 root root 19 Jun 21 2018 libxcb-shm.so.0 -> libxcb-shm.so.0.0.0 -rw-r--r-- 1 root root 11K Jun 21 2018 libxcb-shm.so.0.0.0 lrwxrwxrwx 1 root root 20 Jun 21 2018 libxcb-sync.so.1 -> libxcb-sync.so.1.0.0 -rw-r--r-- 1 root root 27K Jun 21 2018 libxcb-sync.so.1.0.0 lrwxrwxrwx 1 root root 15 Jun 21 2018 libxcb.so.1 -> libxcb.so.1.1.0 -rw-r--r-- 1 root root 159K Jun 21 2018 libxcb.so.1.1.0 lrwxrwxrwx 1 root root 16 Mar 24 2018 libxdot.so -> libxdot.so.4.0.0 lrwxrwxrwx 1 root root 16 Mar 24 2018 libxdot.so.4 -> libxdot.so.4.0.0 -rw-r--r-- 1 root root 23K Mar 24 2018 libxdot.so.4.0.0 -rw-r--r-- 1 root root 3.5M Nov 10 2017 libxerces-c-3.2.so lrwxrwxrwx 1 root root 21 Mar 5 2019 libxkbcommon.so.0 -> libxkbcommon.so.0.0.0 -rw-r--r-- 1 root root 251K Mar 5 2019 libxkbcommon.so.0.0.0 -rw-r--r-- 1 root root 2.9M Aug 1 14:25 libxml2.a lrwxrwxrwx 1 root root 16 Aug 1 14:25 libxml2.so -> libxml2.so.2.9.4 lrwxrwxrwx 1 root root 16 Aug 1 14:25 libxml2.so.2 -> libxml2.so.2.9.4 -rw-r--r-- 1 root root 1.8M Aug 1 14:25 libxml2.so.2.9.4 lrwxrwxrwx 1 root root 21 Mar 18 2018 libxshmfence.so.1 -> libxshmfence.so.1.0.0 -rw-r--r-- 1 root root 6.1K Mar 18 2018 libxshmfence.so.1.0.0 lrwxrwxrwx 1 root root 18 Jan 28 2018 libxvidcore.so.4 -> libxvidcore.so.4.3 -rw-r--r-- 1 root root 672K Jan 28 2018 libxvidcore.so.4.3 -rw-r--r-- 1 root root 162K Mar 26 2022 libz.a lrwxrwxrwx 1 root root 36 Mar 26 2022 libz.so -> /lib/x86_64-linux-gnu/libz.so.1.2.11 lrwxrwxrwx 1 root root 16 Mar 3 2021 libzstd.so.1 -> libzstd.so.1.3.3 -rw-r--r-- 1 root root 490K Mar 3 2021 libzstd.so.1.3.3 lrwxrwxrwx 1 root root 23 Nov 17 2016 libzvbi-chains.so.0 -> libzvbi-chains.so.0.0.0 -rw-r--r-- 1 root root 58K Nov 17 2016 libzvbi-chains.so.0.0.0 lrwxrwxrwx 1 root root 17 Nov 17 2016 libzvbi.so.0 -> libzvbi.so.0.13.2 -rw-r--r-- 1 root root 556K Nov 17 2016 libzvbi.so.0.13.2 drwxr-xr-x 2 root root 4.0K Aug 16 08:53 nss drwxr-xr-x 2 root root 4.0K Aug 16 08:53 odbc drwxr-xr-x 2 root root 4.0K Aug 16 08:53 openblas drwxr-xr-x 3 root root 4.0K Aug 16 08:53 openmpi drwxr-xr-x 1 root root 4.0K May 28 19:15 perl drwxr-xr-x 16 root root 4.0K Apr 28 00:03 perl-base drwxr-xr-x 2 root root 4.0K Aug 16 08:54 pkgconfig -rw-r--r-- 1 root root 1.8K May 3 10:19 rcrt1.o drwxr-xr-x 2 root root 4.0K Aug 16 08:53 rsocket drwxr-xr-x 2 root root 4.0K May 23 21:53 sasl2 drwxr-xr-x 2 root root 4.0K May 28 19:16 stubs drwxr-xr-x 2 root root 4.0K Aug 16 08:53 vdpau drwxr-xr-x 2 root root 4.0K Aug 16 08:53 x264-10bit -rw-r--r-- 1 root root 204 Aug 1 14:25 xml2Conf.sh [Pipeline] echo Packing build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so into dgl-gpu-linux [Pipeline] stash Stashed 4 file(s) Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 681e11859cd167f00c1889ac9c7c8c96d552be64f67c250e1cf0b5bc1d83bafe $ docker rm -f 681e11859cd167f00c1889ac9c7c8c96d552be64f67c250e1cf0b5bc1d83bafe [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } [Pipeline] // stage [Pipeline] } [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_cumsum.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/array.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/array_arith.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_index_select.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_pack.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_nonzero.cc.o [ 61%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_op_impl.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_mm.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_sort.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_remove.cc.o [ 62%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_sort.cc.o [ 63%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_repeat.cc.o [ 63%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_scatter.cc.o [ 63%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_coalesce.cc.o [ 63%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/array_sort.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_linegraph.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/sddmm.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/rowwise_topk.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/coo_remove.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/segment_reduce.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_to_simple.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/gather_mm.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_get_data.cc.o [ 65%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_sum.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/csr_union.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/negative_sampling.cc.o [ 66%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/disjoint_union.cc.o [ 67%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/rowwise_sampling.cc.o [ 67%] Building CXX object CMakeFiles/dgl.dir/src/array/filter.cc.o [ 68%] Building CXX object CMakeFiles/dgl.dir/src/array/kernel.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/spmat_op_impl_csr.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/spmat_op_impl_coo.cc.o [ 68%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/spmm.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/array/union_partition.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/array/cpu/traversal.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/array/uvm_array.cc.o [ 69%] Building CXX object CMakeFiles/dgl.dir/src/geometry/geometry.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/bcast.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/array/libra_partition.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/c_api_common.cc.o [ 71%] Building CXX object CMakeFiles/dgl.dir/src/geometry/cpu/geometry_op_impl.cc.o [ 72%] Building CXX object CMakeFiles/dgl.dir/src/partition/ndarray_partition.cc.o [ 72%] Building CXX object CMakeFiles/dgl.dir/src/random/cpu/choice.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/cpu_device_api.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/c_runtime_api.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/config.cc.o [ 73%] Building CXX object CMakeFiles/dgl.dir/src/runtime/dlpack_convert.cc.o [ 74%] Building CXX object CMakeFiles/dgl.dir/src/runtime/file_util.cc.o [ 74%] Building CXX object CMakeFiles/dgl.dir/src/runtime/module.cc.o [ 74%] Building CXX object CMakeFiles/dgl.dir/src/runtime/module_util.cc.o [ 74%] Building CXX object CMakeFiles/dgl.dir/src/random/random.cc.o [ 74%] Building CXX object CMakeFiles/dgl.dir/src/runtime/semaphore_wrapper.cc.o [ 74%] Building CXX object CMakeFiles/dgl.dir/src/runtime/c_object_api.cc.o [ 74%] Building CXX object CMakeFiles/dgl.dir/src/runtime/ndarray.cc.o [ 75%] Building CXX object CMakeFiles/dgl.dir/src/runtime/object.cc.o [ 75%] Building CXX object CMakeFiles/dgl.dir/src/runtime/dso_module.cc.o [ 75%] Building CXX object CMakeFiles/dgl.dir/src/runtime/system_lib_module.cc.o [ 75%] Building CXX object CMakeFiles/dgl.dir/src/runtime/resource_manager.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/runtime/shared_mem.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/runtime/thread_pool.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/runtime/tensordispatch.cc.o [ 77%] Building CXX object CMakeFiles/dgl.dir/src/runtime/threading_backend.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/runtime/utils.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/runtime/workspace_pool.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/runtime/registry.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/get_node_types_cpu.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/node2vec_cpu.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/heterograph_serialize.cc.o [ 78%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/randomwalk_cpu.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/randomwalk_with_restart_cpu.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/dglgraph_serialize.cc.o [ 79%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/randomwalks.cc.o [ 80%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampler.cc.o [ 81%] Building CXX object CMakeFiles/dgl.dir/src/graph/subgraph.cc.o [ 81%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/zerocopy_serializer.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/randomwalks/node2vec.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/neighbor/neighbor.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/knn.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/metis_partition_hetero.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/graph_serialize.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/serialize/tensor_serialize.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/shared_mem_manager.cc.o [ 83%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/line_graph.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/cpu/knn.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/to_bipartite.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/api/api_container.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/to_simple.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/scheduler/scheduler_apis.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/scheduler/scheduler.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/remove_edges.cc.o [ 84%] Building CXX object CMakeFiles/dgl.dir/src/graph/unit_graph.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/graph/metis_partition.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/union_partition.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/msg_queue.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/graph/nodeflow.cc.o [ 85%] Building CXX object CMakeFiles/dgl.dir/src/graph/gk_ops.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/partition_hetero.cc.o [ 86%] Building CXX object CMakeFiles/dgl.dir/src/graph/creators.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/socket_pool.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/api/api_test.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/heterograph_capi.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph_traversal.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/heterograph.cc.o [ 87%] Building CXX object CMakeFiles/dgl.dir/src/graph/immutable_graph.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph_op.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph_apis.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/graph/sampling/negative/global_uniform.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/graph/pickle.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/graph/network.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/graph/graph.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/graph/transform/compact.cc.o [ 89%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/socket_communicator.cc.o [ 90%] Building CXX object CMakeFiles/dgl.dir/src/graph/traversal.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/common.cc.o [ 91%] Building CXX object CMakeFiles/dgl.dir/src/rpc/rpc.cc.o [ 92%] Building CXX object CMakeFiles/dgl.dir/src/rpc/network/tcp_socket.cc.o [ 92%] Building CXX object CMakeFiles/dgl.dir/src/array/cuda/csr_transpose.cc.o [ 92%] Building CXX object CMakeFiles/dgl.dir/src/rpc/tensorpipe/tp_communicator.cc.o [ 92%] Building CXX object CMakeFiles/dgl.dir/src/runtime/cuda/cuda_device_api.cc.o In file included from /root/jenkins/workspace/dgl_PR-4648@3/src/geometry/../c_api_common.h:10:0, from /root/jenkins/workspace/dgl_PR-4648@3/src/geometry/geometry.cc:9: /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/runtime/packed_func.h:502:21: warning: inline function 'TObjectRef dgl::runtime::DGLArgValue::AsObjectRef() const [with TObjectRef = dgl::HeteroGraphRef]' used but never defined inline TObjectRef AsObjectRef() const; ^~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648@3/src/partition/ndarray_partition.cc: In member function 'virtual int64_t dgl::partition::RangePartition::PartSize(int) const': /root/jenkins/workspace/dgl_PR-4648@3/src/partition/ndarray_partition.cc:202:3: warning: control reaches end of non-void function [-Wreturn-type] } ^ In file included from /root/jenkins/workspace/dgl_PR-4648@3/third_party/dmlc-core/include/dmlc/logging.h:132:0, from /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/./runtime/object.h:9, from /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/graph_interface.h:15, from /root/jenkins/workspace/dgl_PR-4648@3/include/dgl/sampler.h:13, from /root/jenkins/workspace/dgl_PR-4648@3/src/graph/sampler.cc:6: /root/jenkins/workspace/dgl_PR-4648@3/src/graph/sampler.cc: In member function 'dgl::NegSubgraph dgl::{anonymous}::EdgeSamplerObject::genNegEdgeSubgraph(const dgl::Subgraph&, const string&, int64_t, bool, bool)': /root/jenkins/workspace/dgl_PR-4648@3/src/graph/sampler.cc:1189:48: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] assert(prev_neg_offset + neg_sample_size == neg_vids.size()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648@3/src/graph/sampler.cc:1193:48: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] assert(prev_neg_offset + neg_sample_size == neg_vids.size()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648@3/src/array/libra_partition.cc: In function 'dgl::runtime::List dgl::aten::Libra2dglBuildDict(dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, dgl::runtime::NDArray, int32_t, int32_t, int64_t, const string&)': /root/jenkins/workspace/dgl_PR-4648@3/src/array/libra_partition.cc:396:11: warning: ignoring return value of 'int fscanf(FILE*, const char*, ...)', declared with attribute warn_unused_result [-Wunused-result] fscanf(fp, "%ld,%ld,%f\n", &u, &v, &w); // reading an edge - the src and dst global node IDs ~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648@3/src/array/libra_partition.cc: In function 'int32_t dgl::aten::Ver2partition(IdType, int64_t*, int32_t) [with IdType = long int]': /root/jenkins/workspace/dgl_PR-4648@3/src/array/libra_partition.cc:43:1: warning: control reaches end of non-void function [-Wreturn-type] } ^ [ 93%] Linking CXX shared library libdgl.so [ 93%] Built target dgl [ 95%] Building CXX object CMakeFiles/rpc_server.dir/tests/dist/cpp/rpc_server.cc.o [ 95%] Building CXX object CMakeFiles/rpc_client.dir/tests/dist/cpp/rpc_client.cc.o [ 95%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/graph_index_test.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/socket_communicator_test.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/string_test.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_aten.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_csrmm.cc.o [ 96%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/message_queue_test.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_partition.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_rowwise.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_smart_ptr_serialize.cc.o [ 97%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_serialize.cc.o [ 98%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_spmat_coo.cc.o [ 98%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_spmm.cc.o [100%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_zerocopy_serialize.cc.o [100%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_sampler.cc.o [100%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_unit_graph.cc.o [100%] Building CXX object CMakeFiles/runUnitTests.dir/tests/cpp/test_spmat_csr.cc.o /root/jenkins/workspace/dgl_PR-4648@3/tests/dist/cpp/rpc_client.cc: In member function 'void RPCClient::StartClient()': /root/jenkins/workspace/dgl_PR-4648@3/tests/dist/cpp/rpc_client.cc:50:15: warning: unused variable 'num_machines' [-Wunused-variable] const int num_machines = ips_.size(); ^~~~~~~~~~~~ /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc: In instantiation of 'void _TestRemainder_MapToX() [with DGLDeviceType XPU = (DGLDeviceType)2; IdType = int]': /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:86:44: required from here /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:66:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < global->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:74:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < act_local->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc: In instantiation of 'void _TestRemainder_MapToX() [with DGLDeviceType XPU = (DGLDeviceType)2; IdType = long int]': /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:87:44: required from here /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:66:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < global->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:74:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < act_local->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc: In instantiation of 'void _TestRange_MapToX() [with DGLDeviceType XPU = (DGLDeviceType)2; IdType = int]': /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:195:40: required from here /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:176:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < global->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:184:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < act_local->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc: In instantiation of 'void _TestRange_MapToX() [with DGLDeviceType XPU = (DGLDeviceType)2; IdType = long int]': /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:196:40: required from here /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:176:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < global->shape[0]; ++i) { /root/jenkins/workspace/dgl_PR-4648@3/tests/cpp/test_partition.cc:184:26: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] for (size_t i = 0; i < act_local->shape[0]; ++i) { [100%] Linking CXX executable rpc_client [100%] Linking CXX executable rpc_server [100%] Built target rpc_client [100%] Built target rpc_server [100%] Linking CXX executable runUnitTests [100%] Built target runUnitTests ~/jenkins/workspace/dgl_PR-4648@3 ~/jenkins/workspace/dgl_PR-4648@3/python ~/jenkins/workspace/dgl_PR-4648@3 WARNING: Skipping dgl as it is not installed. WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv WARNING: Cython is not installed, will compile without cython module running install /opt/conda/lib/python3.9/site-packages/setuptools/command/install.py:34: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools. warnings.warn( /opt/conda/lib/python3.9/site-packages/setuptools/command/easy_install.py:144: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools. warnings.warn( running bdist_egg running egg_info creating dgl.egg-info writing dgl.egg-info/PKG-INFO writing dependency_links to dgl.egg-info/dependency_links.txt writing requirements to dgl.egg-info/requires.txt writing top-level names to dgl.egg-info/top_level.txt writing manifest file 'dgl.egg-info/SOURCES.txt' reading manifest file 'dgl.egg-info/SOURCES.txt' writing manifest file 'dgl.egg-info/SOURCES.txt' installing library code to build/bdist.linux-x86_64/egg running install_lib running build_py creating build creating build/lib.linux-x86_64-cpython-39 creating build/lib.linux-x86_64-cpython-39/dgl copying dgl/partition.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/core.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/subgraph.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/traversal.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/base.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/heterograph_index.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/sparse.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/global_config.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/_api_internal.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/heterograph.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/network.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/logging.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/graph_index.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/udf.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/ndarray.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/init.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/view.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/generators.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/convert.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/container.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/merge.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/readout.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/propagate.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/batch.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/frame.py -> build/lib.linux-x86_64-cpython-39/dgl copying dgl/random.py -> build/lib.linux-x86_64-cpython-39/dgl creating build/lib.linux-x86_64-cpython-39/dgl/mock_sparse copying dgl/mock_sparse/sp_matrix.py -> build/lib.linux-x86_64-cpython-39/dgl/mock_sparse copying dgl/mock_sparse/diag_matrix.py -> build/lib.linux-x86_64-cpython-39/dgl/mock_sparse copying dgl/mock_sparse/reduction.py -> build/lib.linux-x86_64-cpython-39/dgl/mock_sparse copying dgl/mock_sparse/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/mock_sparse copying dgl/mock_sparse/elementwise_op_sp.py -> build/lib.linux-x86_64-cpython-39/dgl/mock_sparse creating build/lib.linux-x86_64-cpython-39/dgl/geometry copying dgl/geometry/edge_coarsening.py -> build/lib.linux-x86_64-cpython-39/dgl/geometry copying dgl/geometry/fps.py -> build/lib.linux-x86_64-cpython-39/dgl/geometry copying dgl/geometry/capi.py -> build/lib.linux-x86_64-cpython-39/dgl/geometry copying dgl/geometry/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/geometry creating build/lib.linux-x86_64-cpython-39/dgl/function copying dgl/function/base.py -> build/lib.linux-x86_64-cpython-39/dgl/function copying dgl/function/message.py -> build/lib.linux-x86_64-cpython-39/dgl/function copying dgl/function/reducer.py -> build/lib.linux-x86_64-cpython-39/dgl/function copying dgl/function/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/function creating build/lib.linux-x86_64-cpython-39/dgl/nn copying dgl/nn/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/nn creating build/lib.linux-x86_64-cpython-39/dgl/contrib copying dgl/contrib/dis_kvstore.py -> build/lib.linux-x86_64-cpython-39/dgl/contrib copying dgl/contrib/unified_tensor.py -> build/lib.linux-x86_64-cpython-39/dgl/contrib copying dgl/contrib/graph_store.py -> build/lib.linux-x86_64-cpython-39/dgl/contrib copying dgl/contrib/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/contrib creating build/lib.linux-x86_64-cpython-39/dgl/utils copying dgl/utils/checks.py -> build/lib.linux-x86_64-cpython-39/dgl/utils copying dgl/utils/exception.py -> build/lib.linux-x86_64-cpython-39/dgl/utils copying dgl/utils/pin_memory.py -> build/lib.linux-x86_64-cpython-39/dgl/utils copying dgl/utils/internal.py -> build/lib.linux-x86_64-cpython-39/dgl/utils copying dgl/utils/filter.py -> build/lib.linux-x86_64-cpython-39/dgl/utils copying dgl/utils/data.py -> build/lib.linux-x86_64-cpython-39/dgl/utils copying dgl/utils/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/utils copying dgl/utils/shared_mem.py -> build/lib.linux-x86_64-cpython-39/dgl/utils creating build/lib.linux-x86_64-cpython-39/dgl/multiprocessing copying dgl/multiprocessing/pytorch.py -> build/lib.linux-x86_64-cpython-39/dgl/multiprocessing copying dgl/multiprocessing/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/multiprocessing creating build/lib.linux-x86_64-cpython-39/dgl/optim copying dgl/optim/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/optim creating build/lib.linux-x86_64-cpython-39/dgl/dataloading copying dgl/dataloading/base.py -> build/lib.linux-x86_64-cpython-39/dgl/dataloading copying dgl/dataloading/dataloader.py -> build/lib.linux-x86_64-cpython-39/dgl/dataloading copying dgl/dataloading/graphsaint.py -> build/lib.linux-x86_64-cpython-39/dgl/dataloading copying dgl/dataloading/shadow.py -> build/lib.linux-x86_64-cpython-39/dgl/dataloading copying dgl/dataloading/negative_sampler.py -> build/lib.linux-x86_64-cpython-39/dgl/dataloading copying dgl/dataloading/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/dataloading copying dgl/dataloading/cluster_gcn.py -> build/lib.linux-x86_64-cpython-39/dgl/dataloading copying dgl/dataloading/neighbor_sampler.py -> build/lib.linux-x86_64-cpython-39/dgl/dataloading copying dgl/dataloading/dist_dataloader.py -> build/lib.linux-x86_64-cpython-39/dgl/dataloading creating build/lib.linux-x86_64-cpython-39/dgl/_dataloading copying dgl/_dataloading/neighbor.py -> build/lib.linux-x86_64-cpython-39/dgl/_dataloading copying dgl/_dataloading/dataloader.py -> build/lib.linux-x86_64-cpython-39/dgl/_dataloading copying dgl/_dataloading/shadow.py -> build/lib.linux-x86_64-cpython-39/dgl/_dataloading copying dgl/_dataloading/negative_sampler.py -> build/lib.linux-x86_64-cpython-39/dgl/_dataloading copying dgl/_dataloading/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/_dataloading copying dgl/_dataloading/cluster_gcn.py -> build/lib.linux-x86_64-cpython-39/dgl/_dataloading creating build/lib.linux-x86_64-cpython-39/dgl/backend copying dgl/backend/backend.py -> build/lib.linux-x86_64-cpython-39/dgl/backend copying dgl/backend/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/backend copying dgl/backend/set_default_backend.py -> build/lib.linux-x86_64-cpython-39/dgl/backend creating build/lib.linux-x86_64-cpython-39/dgl/ops copying dgl/ops/edge_softmax.py -> build/lib.linux-x86_64-cpython-39/dgl/ops copying dgl/ops/sddmm.py -> build/lib.linux-x86_64-cpython-39/dgl/ops copying dgl/ops/gather_mm.py -> build/lib.linux-x86_64-cpython-39/dgl/ops copying dgl/ops/segment.py -> build/lib.linux-x86_64-cpython-39/dgl/ops copying dgl/ops/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/ops copying dgl/ops/spmm.py -> build/lib.linux-x86_64-cpython-39/dgl/ops creating build/lib.linux-x86_64-cpython-39/dgl/cuda copying dgl/cuda/nccl.py -> build/lib.linux-x86_64-cpython-39/dgl/cuda copying dgl/cuda/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/cuda creating build/lib.linux-x86_64-cpython-39/dgl/storages copying dgl/storages/pytorch_tensor.py -> build/lib.linux-x86_64-cpython-39/dgl/storages copying dgl/storages/base.py -> build/lib.linux-x86_64-cpython-39/dgl/storages copying dgl/storages/tensor.py -> build/lib.linux-x86_64-cpython-39/dgl/storages copying dgl/storages/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/storages copying dgl/storages/numpy.py -> build/lib.linux-x86_64-cpython-39/dgl/storages creating build/lib.linux-x86_64-cpython-39/dgl/sampling copying dgl/sampling/neighbor.py -> build/lib.linux-x86_64-cpython-39/dgl/sampling copying dgl/sampling/pinsage.py -> build/lib.linux-x86_64-cpython-39/dgl/sampling copying dgl/sampling/negative.py -> build/lib.linux-x86_64-cpython-39/dgl/sampling copying dgl/sampling/randomwalks.py -> build/lib.linux-x86_64-cpython-39/dgl/sampling copying dgl/sampling/utils.py -> build/lib.linux-x86_64-cpython-39/dgl/sampling copying dgl/sampling/node2vec_randomwalk.py -> build/lib.linux-x86_64-cpython-39/dgl/sampling copying dgl/sampling/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/sampling creating build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/qm7b.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/graph_serialize.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/dgl_dataset.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/csv_dataset_base.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/knowledge_graph.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/qm9_edge.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/gnn_benchmark.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/qm9.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/icews18.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/gdelt.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/utils.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/sbm.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/csv_dataset.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/karate.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/citation_graph.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/rdf.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/bitcoinotc.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/ppi.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/tensor_serialize.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/flickr.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/wikics.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/heterograph_serialize.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/tu.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/gindt.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/tree.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/synthetic.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/adapter.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/fakenews.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/fraud.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/minigc.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/yelp.py -> build/lib.linux-x86_64-cpython-39/dgl/data copying dgl/data/reddit.py -> build/lib.linux-x86_64-cpython-39/dgl/data creating build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/dist_graph.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/graph_partition_book.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/partition.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/standalone_kvstore.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/graph_services.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/role.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/constants.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/rpc_server.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/kvstore.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/dist_context.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/rpc.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/rpc_client.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/id_map.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/server_state.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/shared_mem_utils.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/dist_tensor.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed copying dgl/distributed/dist_dataloader.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed creating build/lib.linux-x86_64-cpython-39/dgl/_deprecate copying dgl/_deprecate/nodeflow.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate copying dgl/_deprecate/udf.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate copying dgl/_deprecate/view.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate copying dgl/_deprecate/kernel.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate copying dgl/_deprecate/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate copying dgl/_deprecate/graph.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate copying dgl/_deprecate/frame.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate creating build/lib.linux-x86_64-cpython-39/dgl/transforms copying dgl/transforms/functional.py -> build/lib.linux-x86_64-cpython-39/dgl/transforms copying dgl/transforms/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/transforms copying dgl/transforms/module.py -> build/lib.linux-x86_64-cpython-39/dgl/transforms creating build/lib.linux-x86_64-cpython-39/dgl/distgnn copying dgl/distgnn/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distgnn creating build/lib.linux-x86_64-cpython-39/dgl/_ffi copying dgl/_ffi/runtime_ctypes.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi copying dgl/_ffi/function.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi copying dgl/_ffi/libinfo.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi copying dgl/_ffi/streams.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi copying dgl/_ffi/base.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi copying dgl/_ffi/ndarray.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi copying dgl/_ffi/object_generic.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi copying dgl/_ffi/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi copying dgl/_ffi/object.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi creating build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch copying dgl/nn/pytorch/glob.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch copying dgl/nn/pytorch/hetero.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch copying dgl/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch copying dgl/nn/pytorch/linear.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch copying dgl/nn/pytorch/utils.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch copying dgl/nn/pytorch/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch copying dgl/nn/pytorch/factory.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch copying dgl/nn/pytorch/softmax.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch creating build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet copying dgl/nn/mxnet/glob.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet copying dgl/nn/mxnet/hetero.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet copying dgl/nn/mxnet/utils.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet copying dgl/nn/mxnet/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet copying dgl/nn/mxnet/softmax.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet creating build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow copying dgl/nn/tensorflow/glob.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow copying dgl/nn/tensorflow/hetero.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow copying dgl/nn/tensorflow/utils.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow copying dgl/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow copying dgl/nn/tensorflow/softmax.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow creating build/lib.linux-x86_64-cpython-39/dgl/nn/functional copying dgl/nn/functional/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/functional creating build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transe.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/edgepred.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/transr.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/link copying dgl/nn/pytorch/link/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/link creating build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/agnnconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/relgraphconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/appnpconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dgnconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/grouprevres.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/twirlsconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egatconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/graphconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densechebconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gcn2conv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/ginconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/dotgatconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/cfconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gmmconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sageconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densesageconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gineconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/edgeconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/nnconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatv2conv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/hgtconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/pnaconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatedgraphconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/atomicconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/tagconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/densegraphconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/egnnconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/chebconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/sgconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv copying dgl/nn/pytorch/conv/gatconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv creating build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/gnnexplainer.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/explain copying dgl/nn/pytorch/explain/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/explain creating build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/agnnconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/relgraphconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/appnpconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/graphconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densechebconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/ginconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gmmconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sageconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densesageconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/edgeconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/nnconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatedgraphconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/tagconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/densegraphconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/chebconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/sgconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv copying dgl/nn/mxnet/conv/gatconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv creating build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/relgraphconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/appnpconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/graphconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/densechebconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/ginconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sageconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/edgeconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/chebconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/sgconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv copying dgl/nn/tensorflow/conv/gatconv.py -> build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv creating build/lib.linux-x86_64-cpython-39/dgl/contrib/sampling copying dgl/contrib/sampling/sampler.py -> build/lib.linux-x86_64-cpython-39/dgl/contrib/sampling copying dgl/contrib/sampling/dis_sampler.py -> build/lib.linux-x86_64-cpython-39/dgl/contrib/sampling copying dgl/contrib/sampling/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/contrib/sampling creating build/lib.linux-x86_64-cpython-39/dgl/contrib/data copying dgl/contrib/data/knowledge_graph.py -> build/lib.linux-x86_64-cpython-39/dgl/contrib/data copying dgl/contrib/data/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/contrib/data creating build/lib.linux-x86_64-cpython-39/dgl/optim/pytorch copying dgl/optim/pytorch/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/optim/pytorch copying dgl/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-cpython-39/dgl/optim/pytorch creating build/lib.linux-x86_64-cpython-39/dgl/optim/mxnet copying dgl/optim/mxnet/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/optim/mxnet creating build/lib.linux-x86_64-cpython-39/dgl/optim/tensorflow copying dgl/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/optim/tensorflow creating build/lib.linux-x86_64-cpython-39/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/dataloader.py -> build/lib.linux-x86_64-cpython-39/dgl/_dataloading/pytorch copying dgl/_dataloading/pytorch/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/_dataloading/pytorch creating build/lib.linux-x86_64-cpython-39/dgl/backend/pytorch copying dgl/backend/pytorch/sparse.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/pytorch copying dgl/backend/pytorch/tensor.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/pytorch copying dgl/backend/pytorch/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/pytorch creating build/lib.linux-x86_64-cpython-39/dgl/backend/mxnet copying dgl/backend/mxnet/sparse.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/mxnet copying dgl/backend/mxnet/tensor.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/mxnet copying dgl/backend/mxnet/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/mxnet copying dgl/backend/mxnet/sparse_optim.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/mxnet creating build/lib.linux-x86_64-cpython-39/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/tensorflow copying dgl/backend/tensorflow/tensor.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/tensorflow copying dgl/backend/tensorflow/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/tensorflow copying dgl/backend/tensorflow/sparse_optim.py -> build/lib.linux-x86_64-cpython-39/dgl/backend/tensorflow creating build/lib.linux-x86_64-cpython-39/dgl/distributed/nn copying dgl/distributed/nn/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/nn creating build/lib.linux-x86_64-cpython-39/dgl/distributed/optim copying dgl/distributed/optim/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/optim creating build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/sparse_emb.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/pytorch copying dgl/distributed/nn/pytorch/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/pytorch creating build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/mxnet copying dgl/distributed/nn/mxnet/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/mxnet creating build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/tensorflow copying dgl/distributed/nn/tensorflow/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/tensorflow creating build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/utils.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/pytorch copying dgl/distributed/optim/pytorch/sparse_optim.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/pytorch creating build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/mxnet copying dgl/distributed/optim/mxnet/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/mxnet creating build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/tensorflow copying dgl/distributed/optim/tensorflow/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/tensorflow creating build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/spmv.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/degree_bucketing.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/scheduler.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/adapter.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime copying dgl/_deprecate/runtime/runtime.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime creating build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/program.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/executor.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/var.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/registry.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir copying dgl/_deprecate/runtime/ir/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir creating build/lib.linux-x86_64-cpython-39/dgl/distgnn/partition copying dgl/distgnn/partition/libra_partition.py -> build/lib.linux-x86_64-cpython-39/dgl/distgnn/partition copying dgl/distgnn/partition/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distgnn/partition creating build/lib.linux-x86_64-cpython-39/dgl/distgnn/tools copying dgl/distgnn/tools/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/distgnn/tools copying dgl/distgnn/tools/tools.py -> build/lib.linux-x86_64-cpython-39/dgl/distgnn/tools creating build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/function.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/types.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/ndarray.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes copying dgl/_ffi/_ctypes/object.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes creating build/lib.linux-x86_64-cpython-39/dgl/_ffi/_cy3 copying dgl/_ffi/_cy3/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi/_cy3 creating build/lib.linux-x86_64-cpython-39/dgl/_ffi/_cy2 copying dgl/_ffi/_cy2/__init__.py -> build/lib.linux-x86_64-cpython-39/dgl/_ffi/_cy2 running build_ext creating build/bdist.linux-x86_64 creating build/bdist.linux-x86_64/egg creating build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-cpython-39/dgl/mock_sparse/sp_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-cpython-39/dgl/mock_sparse/diag_matrix.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-cpython-39/dgl/mock_sparse/reduction.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-cpython-39/dgl/mock_sparse/__init__.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse copying build/lib.linux-x86_64-cpython-39/dgl/mock_sparse/elementwise_op_sp.py -> build/bdist.linux-x86_64/egg/dgl/mock_sparse creating build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-cpython-39/dgl/geometry/edge_coarsening.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-cpython-39/dgl/geometry/fps.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-cpython-39/dgl/geometry/capi.py -> build/bdist.linux-x86_64/egg/dgl/geometry copying build/lib.linux-x86_64-cpython-39/dgl/geometry/__init__.py -> build/bdist.linux-x86_64/egg/dgl/geometry creating build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-cpython-39/dgl/function/base.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-cpython-39/dgl/function/message.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-cpython-39/dgl/function/reducer.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-cpython-39/dgl/function/__init__.py -> build/bdist.linux-x86_64/egg/dgl/function copying build/lib.linux-x86_64-cpython-39/dgl/partition.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/link/transe.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/link/edgepred.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/link/transr.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/link/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/linear.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/dgnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/grouprevres.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/twirlsconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/egatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/gcn2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/dotgatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/cfconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/gineconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/gatv2conv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/hgtconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/pnaconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/atomicconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/egnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/explain/gnnexplainer.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/explain/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/factory.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/nn/pytorch/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet creating build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/agnnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/gmmconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/densesageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/nnconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/gatedgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/tagconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/densegraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/nn/mxnet/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/glob.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/hetero.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/utils.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/relgraphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/appnpconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/graphconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/densechebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/ginconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/sageconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/edgeconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/chebconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/sgconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/conv/gatconv.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/nn/tensorflow/softmax.py -> build/bdist.linux-x86_64/egg/dgl/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-cpython-39/dgl/nn/functional/__init__.py -> build/bdist.linux-x86_64/egg/dgl/nn/functional copying build/lib.linux-x86_64-cpython-39/dgl/core.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/subgraph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-cpython-39/dgl/contrib/dis_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-cpython-39/dgl/contrib/unified_tensor.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-cpython-39/dgl/contrib/graph_store.py -> build/bdist.linux-x86_64/egg/dgl/contrib creating build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-cpython-39/dgl/contrib/sampling/sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-cpython-39/dgl/contrib/sampling/dis_sampler.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling copying build/lib.linux-x86_64-cpython-39/dgl/contrib/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/sampling creating build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-cpython-39/dgl/contrib/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-cpython-39/dgl/contrib/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib/data copying build/lib.linux-x86_64-cpython-39/dgl/contrib/__init__.py -> build/bdist.linux-x86_64/egg/dgl/contrib copying build/lib.linux-x86_64-cpython-39/dgl/traversal.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-cpython-39/dgl/utils/checks.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-cpython-39/dgl/utils/exception.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-cpython-39/dgl/utils/pin_memory.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-cpython-39/dgl/utils/internal.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-cpython-39/dgl/utils/filter.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-cpython-39/dgl/utils/data.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-cpython-39/dgl/utils/__init__.py -> build/bdist.linux-x86_64/egg/dgl/utils copying build/lib.linux-x86_64-cpython-39/dgl/utils/shared_mem.py -> build/bdist.linux-x86_64/egg/dgl/utils creating build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-cpython-39/dgl/multiprocessing/pytorch.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-cpython-39/dgl/multiprocessing/__init__.py -> build/bdist.linux-x86_64/egg/dgl/multiprocessing copying build/lib.linux-x86_64-cpython-39/dgl/base.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim creating build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/optim/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/heterograph_index.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/sparse.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/global_config.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-cpython-39/dgl/dataloading/base.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-cpython-39/dgl/dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-cpython-39/dgl/dataloading/graphsaint.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-cpython-39/dgl/dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-cpython-39/dgl/dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-cpython-39/dgl/dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-cpython-39/dgl/dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-cpython-39/dgl/dataloading/neighbor_sampler.py -> build/bdist.linux-x86_64/egg/dgl/dataloading copying build/lib.linux-x86_64-cpython-39/dgl/dataloading/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-cpython-39/dgl/_dataloading/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-cpython-39/dgl/_dataloading/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading creating build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/_dataloading/pytorch/dataloader.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/_dataloading/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/_dataloading/shadow.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-cpython-39/dgl/_dataloading/negative_sampler.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-cpython-39/dgl/_dataloading/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-cpython-39/dgl/_dataloading/cluster_gcn.py -> build/bdist.linux-x86_64/egg/dgl/_dataloading copying build/lib.linux-x86_64-cpython-39/dgl/_api_internal.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/heterograph.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-cpython-39/dgl/backend/backend.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/backend/pytorch/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/backend/pytorch/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/backend/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/pytorch creating build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/backend/mxnet/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/backend/mxnet/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/backend/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/backend/mxnet/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/backend/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend creating build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/backend/tensorflow/sparse.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/backend/tensorflow/tensor.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/backend/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/backend/tensorflow/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/backend/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/backend/set_default_backend.py -> build/bdist.linux-x86_64/egg/dgl/backend copying build/lib.linux-x86_64-cpython-39/dgl/network.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-cpython-39/dgl/ops/edge_softmax.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-cpython-39/dgl/ops/sddmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-cpython-39/dgl/ops/gather_mm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-cpython-39/dgl/ops/segment.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-cpython-39/dgl/ops/__init__.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-cpython-39/dgl/ops/spmm.py -> build/bdist.linux-x86_64/egg/dgl/ops copying build/lib.linux-x86_64-cpython-39/dgl/logging.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/graph_index.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-cpython-39/dgl/cuda/nccl.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-cpython-39/dgl/cuda/__init__.py -> build/bdist.linux-x86_64/egg/dgl/cuda copying build/lib.linux-x86_64-cpython-39/dgl/udf.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-cpython-39/dgl/storages/pytorch_tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-cpython-39/dgl/storages/base.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-cpython-39/dgl/storages/tensor.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-cpython-39/dgl/storages/__init__.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-cpython-39/dgl/storages/numpy.py -> build/bdist.linux-x86_64/egg/dgl/storages copying build/lib.linux-x86_64-cpython-39/dgl/ndarray.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/init.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/view.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-cpython-39/dgl/sampling/neighbor.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-cpython-39/dgl/sampling/pinsage.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-cpython-39/dgl/sampling/negative.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-cpython-39/dgl/sampling/randomwalks.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-cpython-39/dgl/sampling/utils.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-cpython-39/dgl/sampling/node2vec_randomwalk.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-cpython-39/dgl/sampling/__init__.py -> build/bdist.linux-x86_64/egg/dgl/sampling copying build/lib.linux-x86_64-cpython-39/dgl/generators.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/qm7b.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/graph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/dgl_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/csv_dataset_base.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/knowledge_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/qm9_edge.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/gnn_benchmark.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/qm9.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/icews18.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/gdelt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/utils.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/sbm.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/csv_dataset.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/karate.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/citation_graph.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/rdf.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/bitcoinotc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/ppi.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/tensor_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/flickr.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/wikics.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/heterograph_serialize.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/tu.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/__init__.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/gindt.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/tree.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/synthetic.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/adapter.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/fakenews.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/fraud.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/minigc.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/yelp.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/data/reddit.py -> build/bdist.linux-x86_64/egg/dgl/data copying build/lib.linux-x86_64-cpython-39/dgl/convert.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/container.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/merge.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/readout.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/__init__.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/dist_graph.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/graph_partition_book.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/partition.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/pytorch/sparse_emb.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn creating build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/distributed/nn/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow creating build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/pytorch/utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/pytorch/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch copying build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/pytorch/sparse_optim.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/mxnet/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet copying build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim creating build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/distributed/optim/tensorflow/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow copying build/lib.linux-x86_64-cpython-39/dgl/distributed/standalone_kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/graph_services.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/role.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/constants.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/rpc_server.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/kvstore.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/dist_context.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/rpc.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/rpc_client.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/id_map.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/server_state.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/shared_mem_utils.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/dist_tensor.py -> build/bdist.linux-x86_64/egg/dgl/distributed copying build/lib.linux-x86_64-cpython-39/dgl/distributed/dist_dataloader.py -> build/bdist.linux-x86_64/egg/dgl/distributed creating build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/nodeflow.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/udf.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/view.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/kernel.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/spmv.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/degree_bucketing.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/scheduler.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/adapter.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/runtime.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime creating build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir/program.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir/executor.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir/var.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir/registry.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/runtime/ir/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/graph.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-cpython-39/dgl/_deprecate/frame.py -> build/bdist.linux-x86_64/egg/dgl/_deprecate copying build/lib.linux-x86_64-cpython-39/dgl/propagate.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/batch.py -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-cpython-39/dgl/transforms/functional.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-cpython-39/dgl/transforms/__init__.py -> build/bdist.linux-x86_64/egg/dgl/transforms copying build/lib.linux-x86_64-cpython-39/dgl/transforms/module.py -> build/bdist.linux-x86_64/egg/dgl/transforms creating build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-cpython-39/dgl/distgnn/partition/libra_partition.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition copying build/lib.linux-x86_64-cpython-39/dgl/distgnn/partition/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/partition creating build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-cpython-39/dgl/distgnn/tools/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-cpython-39/dgl/distgnn/tools/tools.py -> build/bdist.linux-x86_64/egg/dgl/distgnn/tools copying build/lib.linux-x86_64-cpython-39/dgl/distgnn/__init__.py -> build/bdist.linux-x86_64/egg/dgl/distgnn creating build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/runtime_ctypes.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/libinfo.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/streams.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/base.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes/function.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes/types.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/_ctypes/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/ndarray.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/object_generic.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/object.py -> build/bdist.linux-x86_64/egg/dgl/_ffi creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/_cy3/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3 creating build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 copying build/lib.linux-x86_64-cpython-39/dgl/_ffi/_cy2/__init__.py -> build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2 copying build/lib.linux-x86_64-cpython-39/dgl/frame.py -> build/bdist.linux-x86_64/egg/dgl copying build/lib.linux-x86_64-cpython-39/dgl/random.py -> build/bdist.linux-x86_64/egg/dgl byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/sp_matrix.py to sp_matrix.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/diag_matrix.py to diag_matrix.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/reduction.py to reduction.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/mock_sparse/elementwise_op_sp.py to elementwise_op_sp.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/edge_coarsening.py to edge_coarsening.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/fps.py to fps.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/capi.py to capi.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/geometry/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/base.py to base.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/message.py to message.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/reducer.py to reducer.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/function/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/partition.py to partition.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/glob.py to glob.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transe.py to transe.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/edgepred.py to edgepred.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/transr.py to transr.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/link/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/hetero.py to hetero.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/sparse_emb.py to sparse_emb.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/linear.py to linear.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/utils.py to utils.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/agnnconv.py to agnnconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/relgraphconv.py to relgraphconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/appnpconv.py to appnpconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dgnconv.py to dgnconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/grouprevres.py to grouprevres.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/twirlsconv.py to twirlsconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egatconv.py to egatconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/graphconv.py to graphconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densechebconv.py to densechebconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gcn2conv.py to gcn2conv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/ginconv.py to ginconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/dotgatconv.py to dotgatconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/cfconv.py to cfconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gmmconv.py to gmmconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sageconv.py to sageconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densesageconv.py to densesageconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gineconv.py to gineconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/edgeconv.py to edgeconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/nnconv.py to nnconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatv2conv.py to gatv2conv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/hgtconv.py to hgtconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/pnaconv.py to pnaconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatedgraphconv.py to gatedgraphconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/atomicconv.py to atomicconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/tagconv.py to tagconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/densegraphconv.py to densegraphconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/egnnconv.py to egnnconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/chebconv.py to chebconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/sgconv.py to sgconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/conv/gatconv.py to gatconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/gnnexplainer.py to gnnexplainer.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/explain/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/factory.py to factory.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/pytorch/softmax.py to softmax.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/glob.py to glob.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/hetero.py to hetero.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/utils.py to utils.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/agnnconv.py to agnnconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/relgraphconv.py to relgraphconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/appnpconv.py to appnpconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/graphconv.py to graphconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densechebconv.py to densechebconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/ginconv.py to ginconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gmmconv.py to gmmconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sageconv.py to sageconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densesageconv.py to densesageconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/edgeconv.py to edgeconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/nnconv.py to nnconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatedgraphconv.py to gatedgraphconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/tagconv.py to tagconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/densegraphconv.py to densegraphconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/chebconv.py to chebconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/sgconv.py to sgconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/conv/gatconv.py to gatconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/mxnet/softmax.py to softmax.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/glob.py to glob.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/hetero.py to hetero.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/utils.py to utils.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/relgraphconv.py to relgraphconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/appnpconv.py to appnpconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/graphconv.py to graphconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/densechebconv.py to densechebconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/ginconv.py to ginconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sageconv.py to sageconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/edgeconv.py to edgeconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/chebconv.py to chebconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/sgconv.py to sgconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/conv/gatconv.py to gatconv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/tensorflow/softmax.py to softmax.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/nn/functional/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/core.py to core.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/subgraph.py to subgraph.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/dis_kvstore.py to dis_kvstore.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/unified_tensor.py to unified_tensor.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/graph_store.py to graph_store.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/sampler.py to sampler.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/dis_sampler.py to dis_sampler.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/sampling/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/knowledge_graph.py to knowledge_graph.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/data/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/contrib/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/traversal.py to traversal.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/checks.py to checks.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/exception.py to exception.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/pin_memory.py to pin_memory.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/internal.py to internal.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/filter.py to filter.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/data.py to data.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/utils/shared_mem.py to shared_mem.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/pytorch.py to pytorch.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/multiprocessing/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/base.py to base.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/pytorch/sparse_optim.py to sparse_optim.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/mxnet/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/optim/tensorflow/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph_index.py to heterograph_index.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sparse.py to sparse.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/global_config.py to global_config.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/base.py to base.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dataloader.py to dataloader.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/graphsaint.py to graphsaint.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/shadow.py to shadow.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/negative_sampler.py to negative_sampler.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/cluster_gcn.py to cluster_gcn.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/neighbor_sampler.py to neighbor_sampler.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/dataloading/dist_dataloader.py to dist_dataloader.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/neighbor.py to neighbor.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/dataloader.py to dataloader.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/dataloader.py to dataloader.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/pytorch/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/shadow.py to shadow.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/negative_sampler.py to negative_sampler.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_dataloading/cluster_gcn.py to cluster_gcn.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_api_internal.py to _api_internal.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/heterograph.py to heterograph.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/backend.py to backend.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/sparse.py to sparse.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/tensor.py to tensor.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/pytorch/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse.py to sparse.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/tensor.py to tensor.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/mxnet/sparse_optim.py to sparse_optim.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse.py to sparse.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/tensor.py to tensor.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/tensorflow/sparse_optim.py to sparse_optim.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/backend/set_default_backend.py to set_default_backend.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/network.py to network.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/edge_softmax.py to edge_softmax.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/sddmm.py to sddmm.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/gather_mm.py to gather_mm.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/segment.py to segment.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ops/spmm.py to spmm.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/logging.py to logging.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/graph_index.py to graph_index.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/nccl.py to nccl.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/cuda/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/udf.py to udf.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/pytorch_tensor.py to pytorch_tensor.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/base.py to base.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/tensor.py to tensor.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/storages/numpy.py to numpy.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/ndarray.py to ndarray.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/init.py to init.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/view.py to view.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/neighbor.py to neighbor.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/pinsage.py to pinsage.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/negative.py to negative.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/randomwalks.py to randomwalks.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/utils.py to utils.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/node2vec_randomwalk.py to node2vec_randomwalk.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/sampling/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/generators.py to generators.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm7b.py to qm7b.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/graph_serialize.py to graph_serialize.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/dgl_dataset.py to dgl_dataset.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset_base.py to csv_dataset_base.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/knowledge_graph.py to knowledge_graph.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9_edge.py to qm9_edge.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gnn_benchmark.py to gnn_benchmark.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/qm9.py to qm9.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/icews18.py to icews18.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gdelt.py to gdelt.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/utils.py to utils.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/sbm.py to sbm.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/csv_dataset.py to csv_dataset.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/karate.py to karate.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/citation_graph.py to citation_graph.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/rdf.py to rdf.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/bitcoinotc.py to bitcoinotc.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/ppi.py to ppi.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tensor_serialize.py to tensor_serialize.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/flickr.py to flickr.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/wikics.py to wikics.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/heterograph_serialize.py to heterograph_serialize.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tu.py to tu.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/gindt.py to gindt.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/tree.py to tree.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/synthetic.py to synthetic.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/adapter.py to adapter.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fakenews.py to fakenews.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/fraud.py to fraud.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/minigc.py to minigc.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/yelp.py to yelp.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/data/reddit.py to reddit.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/convert.py to convert.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/container.py to container.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/merge.py to merge.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/readout.py to readout.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_graph.py to dist_graph.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_partition_book.py to graph_partition_book.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/partition.py to partition.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/sparse_emb.py to sparse_emb.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/pytorch/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/mxnet/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/nn/tensorflow/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/utils.py to utils.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/pytorch/sparse_optim.py to sparse_optim.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/mxnet/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/optim/tensorflow/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/standalone_kvstore.py to standalone_kvstore.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/graph_services.py to graph_services.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/role.py to role.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/constants.py to constants.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_server.py to rpc_server.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/kvstore.py to kvstore.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_context.py to dist_context.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc.py to rpc.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/rpc_client.py to rpc_client.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/id_map.py to id_map.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/server_state.py to server_state.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/shared_mem_utils.py to shared_mem_utils.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_tensor.py to dist_tensor.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distributed/dist_dataloader.py to dist_dataloader.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/nodeflow.py to nodeflow.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/udf.py to udf.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/view.py to view.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/kernel.py to kernel.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/spmv.py to spmv.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/degree_bucketing.py to degree_bucketing.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/scheduler.py to scheduler.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/adapter.py to adapter.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/runtime.py to runtime.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/program.py to program.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/executor.py to executor.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/var.py to var.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/registry.py to registry.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/runtime/ir/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/graph.py to graph.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_deprecate/frame.py to frame.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/propagate.py to propagate.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/batch.py to batch.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/functional.py to functional.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/transforms/module.py to module.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/libra_partition.py to libra_partition.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/partition/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/tools/tools.py to tools.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/distgnn/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/runtime_ctypes.py to runtime_ctypes.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/function.py to function.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/libinfo.py to libinfo.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/streams.py to streams.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/base.py to base.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/function.py to function.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/types.py to types.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/ndarray.py to ndarray.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_ctypes/object.py to object.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/ndarray.py to ndarray.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object_generic.py to object_generic.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/object.py to object.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy3/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/_ffi/_cy2/__init__.py to __init__.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/frame.py to frame.cpython-39.pyc byte-compiling build/bdist.linux-x86_64/egg/dgl/random.py to random.cpython-39.pyc installing package data to build/bdist.linux-x86_64/egg running install_data copying ../build/libdgl.so -> build/bdist.linux-x86_64/egg/dgl creating build/bdist.linux-x86_64/egg/dgl/tensoradapter creating build/bdist.linux-x86_64/egg/dgl/tensoradapter/pytorch creating build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/not-zip-safe -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dgl.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO creating dist creating 'dist/dgl-0.9-py3.9-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it removing 'build/bdist.linux-x86_64/egg' (and everything under it) Processing dgl-0.9-py3.9-linux-x86_64.egg creating /opt/conda/lib/python3.9/site-packages/dgl-0.9-py3.9-linux-x86_64.egg Extracting dgl-0.9-py3.9-linux-x86_64.egg to /opt/conda/lib/python3.9/site-packages Adding dgl 0.9 to easy-install.pth file Installed /opt/conda/lib/python3.9/site-packages/dgl-0.9-py3.9-linux-x86_64.egg Processing dependencies for dgl==0.9 Searching for certifi>=2017.4.17 Reading https://pypi.org/simple/certifi/ /opt/conda/lib/python3.9/site-packages/pkg_resources/__init__.py:123: PkgResourcesDeprecationWarning: is an invalid version and will not be supported in a future release warnings.warn( Downloading https://files.pythonhosted.org/packages/1d/38/fa96a426e0c0e68aabc68e896584b83ad1eec779265a028e156ce509630e/certifi-2022.9.24-py3-none-any.whl#sha256=90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382 Best match: certifi 2022.9.24 Processing certifi-2022.9.24-py3-none-any.whl Installing certifi-2022.9.24-py3-none-any.whl to /opt/conda/lib/python3.9/site-packages Adding certifi 2022.9.24 to easy-install.pth file Installed /opt/conda/lib/python3.9/site-packages/certifi-2022.9.24-py3.9.egg Searching for psutil==5.9.2 Best match: psutil 5.9.2 Adding psutil 5.9.2 to easy-install.pth file Using /opt/conda/lib/python3.9/site-packages Searching for tqdm==4.64.1 Best match: tqdm 4.64.1 Adding tqdm 4.64.1 to easy-install.pth file Installing tqdm script to /opt/conda/bin Using /opt/conda/lib/python3.9/site-packages Searching for requests==2.28.1 Best match: requests 2.28.1 Adding requests 2.28.1 to easy-install.pth file Using /opt/conda/lib/python3.9/site-packages Searching for networkx==2.8.6 Best match: networkx 2.8.6 Adding networkx 2.8.6 to easy-install.pth file Using /opt/conda/lib/python3.9/site-packages Searching for scipy==1.9.1 Best match: scipy 1.9.1 Adding scipy 1.9.1 to easy-install.pth file Using /opt/conda/lib/python3.9/site-packages Searching for numpy==1.22.4 Best match: numpy 1.22.4 Adding numpy 1.22.4 to easy-install.pth file Installing f2py script to /opt/conda/bin Installing f2py3 script to /opt/conda/bin Installing f2py3.9 script to /opt/conda/bin Using /opt/conda/lib/python3.9/site-packages Searching for urllib3==1.26.11 Best match: urllib3 1.26.11 Adding urllib3 1.26.11 to easy-install.pth file Using /opt/conda/lib/python3.9/site-packages Searching for idna==3.3 Best match: idna 3.3 Adding idna 3.3 to easy-install.pth file Using /opt/conda/lib/python3.9/site-packages Searching for charset-normalizer==2.1.1 Best match: charset-normalizer 2.1.1 Adding charset-normalizer 2.1.1 to easy-install.pth file Installing normalizer script to /opt/conda/bin Using /opt/conda/lib/python3.9/site-packages Finished processing dependencies for dgl==0.9 WARNING: Cython is not installed, will compile without cython module running build_ext ~/jenkins/workspace/dgl_PR-4648@3 [Pipeline] sh + ls -lh /usr/lib/x86_64-linux-gnu/ total 507M -rw-r--r-- 1 root root 496 May 3 10:19 Mcrt1.o -rw-r--r-- 1 root root 1.8K May 3 10:19 Scrt1.o drwxr-xr-x 1 root root 4.0K Sep 19 18:34 audit drwxr-xr-x 2 root root 4.0K Apr 28 00:03 coreutils -rw-r--r-- 1 root root 1.9K May 3 10:19 crt1.o -rw-r--r-- 1 root root 1.2K May 3 10:19 crti.o -rw-r--r-- 1 root root 648 May 3 10:19 crtn.o drwxr-xr-x 1 root root 4.0K Sep 19 18:34 engines-1.1 drwxr-xr-x 1 root root 12K Sep 19 18:34 gconv -rw-r--r-- 1 root root 2.5K May 3 10:19 gcrt1.o -rw-r--r-- 1 root root 2.3K May 3 10:19 grcrt1.o drwxr-xr-x 3 root root 4.0K Sep 19 18:34 krb5 drwxr-xr-x 2 root root 4.0K May 20 08:07 ldscripts -rw-r--r-- 1 root root 2.0K May 3 10:19 libBrokenLocale.a lrwxrwxrwx 1 root root 42 May 3 10:19 libBrokenLocale.so -> /lib/x86_64-linux-gnu/libBrokenLocale.so.1 -rw-r--r-- 1 root root 23K May 3 10:19 libanl.a lrwxrwxrwx 1 root root 33 May 3 10:19 libanl.so -> /lib/x86_64-linux-gnu/libanl.so.1 lrwxrwxrwx 1 root root 19 Jun 15 2021 libapt-pkg.so.5.0 -> libapt-pkg.so.5.0.2 -rw-r--r-- 1 root root 1.8M Jun 15 2021 libapt-pkg.so.5.0.2 lrwxrwxrwx 1 root root 23 Jun 15 2021 libapt-private.so.0.0 -> libapt-private.so.0.0.0 -rw-r--r-- 1 root root 415K Jun 15 2021 libapt-private.so.0.0.0 lrwxrwxrwx 1 root root 16 Dec 4 2019 libasan.so.4 -> libasan.so.4.0.0 -rw-r--r-- 1 root root 1.4M Dec 4 2019 libasan.so.4.0.0 lrwxrwxrwx 1 root root 16 Dec 15 2017 libasn1.so.8 -> libasn1.so.8.0.0 -rw-r--r-- 1 root root 647K Dec 15 2017 libasn1.so.8.0.0 lrwxrwxrwx 1 root root 18 Feb 6 2018 libassuan.so.0 -> libassuan.so.0.8.1 -rw-r--r-- 1 root root 75K Feb 6 2018 libassuan.so.0.8.1 lrwxrwxrwx 1 root root 18 Mar 10 2020 libatomic.so.1 -> libatomic.so.1.2.0 -rw-r--r-- 1 root root 27K Mar 10 2020 libatomic.so.1.2.0 -rw-r--r-- 1 root root 1.3M Oct 20 2021 libbfd-2.30-system.so -rw-r--r-- 1 root root 5.3M May 3 10:19 libc.a -rw-r--r-- 1 root root 298 May 3 10:19 libc.so -rw-r--r-- 1 root root 20K May 3 10:19 libc_nonshared.a lrwxrwxrwx 1 root root 15 Mar 10 2020 libcc1.so.0 -> libcc1.so.0.0.0 -rw-r--r-- 1 root root 116K Mar 10 2020 libcc1.so.0.0.0 lrwxrwxrwx 1 root root 34 May 3 10:19 libcidn.so -> /lib/x86_64-linux-gnu/libcidn.so.1 lrwxrwxrwx 1 root root 19 Dec 4 2019 libcilkrts.so.5 -> libcilkrts.so.5.0.0 -rw-r--r-- 1 root root 117K Dec 4 2019 libcilkrts.so.5.0.0 -rw-r--r-- 1 root root 61K May 3 10:19 libcrypt.a lrwxrwxrwx 1 root root 35 May 3 10:19 libcrypt.so -> /lib/x86_64-linux-gnu/libcrypt.so.1 -rw-r--r-- 1 root root 2.8M Jul 4 11:25 libcrypto.so.1.1 lrwxrwxrwx 1 root root 19 Aug 31 19:18 libcurl-gnutls.so.3 -> libcurl-gnutls.so.4 lrwxrwxrwx 1 root root 23 Aug 31 19:18 libcurl-gnutls.so.4 -> libcurl-gnutls.so.4.5.0 -rw-r--r-- 1 root root 515K Aug 31 19:18 libcurl-gnutls.so.4.5.0 lrwxrwxrwx 1 root root 16 Aug 31 19:18 libcurl.so.4 -> libcurl.so.4.5.0 -rw-r--r-- 1 root root 519K Aug 31 19:18 libcurl.so.4.5.0 -rw-r--r-- 1 root root 1.7M Jun 3 2019 libdb-5.3.so lrwxrwxrwx 1 root root 25 Jun 8 2016 libdebconfclient.so.0 -> libdebconfclient.so.0.0.0 -rw-r--r-- 1 root root 11K Jun 8 2016 libdebconfclient.so.0.0.0 -rw-r--r-- 1 root root 12K May 3 10:19 libdl.a lrwxrwxrwx 1 root root 32 May 3 10:19 libdl.so -> /lib/x86_64-linux-gnu/libdl.so.2 lrwxrwxrwx 1 root root 18 Mar 8 2022 libexpatw.so.1 -> libexpatw.so.1.6.7 -rw-r--r-- 1 root root 203K Mar 8 2022 libexpatw.so.1.6.7 lrwxrwxrwx 1 root root 15 Jan 7 2018 libffi.so.6 -> libffi.so.6.0.4 -rw-r--r-- 1 root root 31K Jan 7 2018 libffi.so.6.0.4 lrwxrwxrwx 1 root root 14 May 23 2018 libform.so.5 -> libform.so.5.9 -rw-r--r-- 1 root root 60K May 23 2018 libform.so.5.9 lrwxrwxrwx 1 root root 15 May 23 2018 libformw.so.5 -> libformw.so.5.9 -rw-r--r-- 1 root root 68K May 23 2018 libformw.so.5.9 -rw-r--r-- 1 root root 1.2K May 3 10:19 libg.a lrwxrwxrwx 1 root root 16 Mar 14 2018 libgdbm.so.5 -> libgdbm.so.5.0.0 -rw-r--r-- 1 root root 51K Mar 14 2018 libgdbm.so.5.0.0 lrwxrwxrwx 1 root root 23 Mar 14 2018 libgdbm_compat.so.4 -> libgdbm_compat.so.4.0.0 -rw-r--r-- 1 root root 14K Mar 14 2018 libgdbm_compat.so.4.0.0 lrwxrwxrwx 1 root root 16 Jan 24 2018 libgmp.so.10 -> libgmp.so.10.3.2 -rw-r--r-- 1 root root 515K Jan 24 2018 libgmp.so.10.3.2 lrwxrwxrwx 1 root root 21 Aug 2 12:58 libgnutls.so.30 -> libgnutls.so.30.14.10 -rw-r--r-- 1 root root 1.4M Aug 2 12:58 libgnutls.so.30.14.10 lrwxrwxrwx 1 root root 16 Mar 10 2020 libgomp.so.1 -> libgomp.so.1.0.0 -rw-r--r-- 1 root root 188K Mar 10 2020 libgomp.so.1.0.0 lrwxrwxrwx 1 root root 18 Dec 15 2017 libgssapi.so.3 -> libgssapi.so.3.0.0 -rw-r--r-- 1 root root 260K Dec 15 2017 libgssapi.so.3.0.0 lrwxrwxrwx 1 root root 21 Nov 11 2020 libgssapi_krb5.so.2 -> libgssapi_krb5.so.2.2 -rw-r--r-- 1 root root 299K Nov 11 2020 libgssapi_krb5.so.2.2 lrwxrwxrwx 1 root root 19 Dec 15 2017 libhcrypto.so.4 -> libhcrypto.so.4.1.0 -rw-r--r-- 1 root root 213K Dec 15 2017 libhcrypto.so.4.1.0 lrwxrwxrwx 1 root root 20 Dec 15 2017 libheimbase.so.1 -> libheimbase.so.1.0.0 -rw-r--r-- 1 root root 59K Dec 15 2017 libheimbase.so.1.0.0 lrwxrwxrwx 1 root root 20 Dec 15 2017 libheimntlm.so.0 -> libheimntlm.so.0.1.0 -rw-r--r-- 1 root root 35K Dec 15 2017 libheimntlm.so.0.1.0 lrwxrwxrwx 1 root root 17 Jun 14 2021 libhogweed.so.4 -> libhogweed.so.4.5 -rw-r--r-- 1 root root 215K Jun 14 2021 libhogweed.so.4.5 lrwxrwxrwx 1 root root 17 Dec 15 2017 libhx509.so.5 -> libhx509.so.5.0.0 -rw-r--r-- 1 root root 294K Dec 15 2017 libhx509.so.5.0.0 lrwxrwxrwx 1 root root 16 Oct 24 2019 libidn2.so.0 -> libidn2.so.0.3.3 -rw-r--r-- 1 root root 114K Oct 24 2019 libidn2.so.0.3.3 lrwxrwxrwx 1 root root 16 Mar 9 2018 libisl.so.19 -> libisl.so.19.0.0 -rw-r--r-- 1 root root 1.6M Mar 9 2018 libisl.so.19.0.0 lrwxrwxrwx 1 root root 15 Mar 10 2020 libitm.so.1 -> libitm.so.1.0.0 -rw-r--r-- 1 root root 111K Mar 10 2020 libitm.so.1.0.0 lrwxrwxrwx 1 root root 18 Nov 11 2020 libk5crypto.so.3 -> libk5crypto.so.3.1 -rw-r--r-- 1 root root 195K Nov 11 2020 libk5crypto.so.3.1 lrwxrwxrwx 1 root root 17 Dec 15 2017 libkrb5.so.26 -> libkrb5.so.26.0.0 -rw-r--r-- 1 root root 561K Dec 15 2017 libkrb5.so.26.0.0 lrwxrwxrwx 1 root root 14 Nov 11 2020 libkrb5.so.3 -> libkrb5.so.3.3 -rw-r--r-- 1 root root 857K Nov 11 2020 libkrb5.so.3.3 lrwxrwxrwx 1 root root 21 Nov 11 2020 libkrb5support.so.0 -> libkrb5support.so.0.1 -rw-r--r-- 1 root root 43K Nov 11 2020 libkrb5support.so.0.1 lrwxrwxrwx 1 root root 17 Sep 3 2016 libksba.so.8 -> libksba.so.8.11.6 -rw-r--r-- 1 root root 223K Sep 3 2016 libksba.so.8.11.6 lrwxrwxrwx 1 root root 21 May 12 13:52 liblber-2.4.so.2 -> liblber-2.4.so.2.10.8 -rw-r--r-- 1 root root 55K May 12 13:52 liblber-2.4.so.2.10.8 lrwxrwxrwx 1 root root 18 May 12 13:52 libldap-2.4.so.2 -> libldap_r-2.4.so.2 lrwxrwxrwx 1 root root 23 May 12 13:52 libldap_r-2.4.so.2 -> libldap_r-2.4.so.2.10.8 -rw-r--r-- 1 root root 320K May 12 13:52 libldap_r-2.4.so.2.10.8 lrwxrwxrwx 1 root root 16 Mar 10 2020 liblsan.so.0 -> liblsan.so.0.0.0 -rw-r--r-- 1 root root 338K Mar 10 2020 liblsan.so.0.0.0 lrwxrwxrwx 1 root root 15 May 20 2021 liblz4.so.1 -> liblz4.so.1.7.1 -rw-r--r-- 1 root root 111K May 20 2021 liblz4.so.1.7.1 -rw-r--r-- 1 root root 3.5M May 3 10:19 libm-2.27.a -rw-r--r-- 1 root root 132 May 3 10:19 libm.a -rw-r--r-- 1 root root 186 May 3 10:19 libm.so -rw-r--r-- 1 root root 1.5K May 3 10:19 libmcheck.a lrwxrwxrwx 1 root root 14 May 23 2018 libmenu.so.5 -> libmenu.so.5.9 -rw-r--r-- 1 root root 35K May 23 2018 libmenu.so.5.9 lrwxrwxrwx 1 root root 15 May 23 2018 libmenuw.so.5 -> libmenuw.so.5.9 -rw-r--r-- 1 root root 35K May 23 2018 libmenuw.so.5.9 lrwxrwxrwx 1 root root 15 Jan 23 2018 libmpc.so.3 -> libmpc.so.3.1.0 -rw-r--r-- 1 root root 96K Jan 23 2018 libmpc.so.3.1.0 lrwxrwxrwx 1 root root 16 Feb 7 2018 libmpfr.so.6 -> libmpfr.so.6.0.1 -rw-r--r-- 1 root root 512K Feb 7 2018 libmpfr.so.6.0.1 lrwxrwxrwx 1 root root 15 Mar 10 2020 libmpx.so.2 -> libmpx.so.2.0.1 -rw-r--r-- 1 root root 19K Mar 10 2020 libmpx.so.2.0.1 lrwxrwxrwx 1 root root 23 Mar 10 2020 libmpxwrappers.so.2 -> libmpxwrappers.so.2.0.1 -rw-r--r-- 1 root root 15K Mar 10 2020 libmpxwrappers.so.2.0.1 -rw-r--r-- 1 root root 337K May 3 10:19 libmvec.a lrwxrwxrwx 1 root root 34 May 3 10:19 libmvec.so -> /lib/x86_64-linux-gnu/libmvec.so.1 -rw-r--r-- 1 root root 5.9K May 3 10:19 libmvec_nonshared.a lrwxrwxrwx 1 root root 12 Oct 19 2021 libnccl.so -> libnccl.so.2 lrwxrwxrwx 1 root root 17 Oct 19 2021 libnccl.so.2 -> libnccl.so.2.11.4 -rw-r--r-- 1 root root 220M Oct 19 2021 libnccl.so.2.11.4 -rw-r--r-- 1 root root 238M Oct 19 2021 libnccl_static.a lrwxrwxrwx 1 root root 16 Jun 14 2021 libnettle.so.6 -> libnettle.so.6.5 -rw-r--r-- 1 root root 215K Jun 14 2021 libnettle.so.6.5 lrwxrwxrwx 1 root root 21 Apr 10 2018 libnghttp2.so.14 -> libnghttp2.so.14.15.2 -rw-r--r-- 1 root root 150K Apr 10 2018 libnghttp2.so.14.15.2 lrwxrwxrwx 1 root root 16 Nov 12 2017 libnpth.so.0 -> libnpth.so.0.1.1 -rw-r--r-- 1 root root 15K Nov 12 2017 libnpth.so.0.1.1 -rw-r--r-- 1 root root 201K May 3 10:19 libnsl.a lrwxrwxrwx 1 root root 33 May 3 10:19 libnsl.so -> /lib/x86_64-linux-gnu/libnsl.so.1 lrwxrwxrwx 1 root root 40 May 3 10:19 libnss_compat.so -> /lib/x86_64-linux-gnu/libnss_compat.so.2 lrwxrwxrwx 1 root root 37 May 3 10:19 libnss_dns.so -> /lib/x86_64-linux-gnu/libnss_dns.so.2 lrwxrwxrwx 1 root root 39 May 3 10:19 libnss_files.so -> /lib/x86_64-linux-gnu/libnss_files.so.2 lrwxrwxrwx 1 root root 40 May 3 10:19 libnss_hesiod.so -> /lib/x86_64-linux-gnu/libnss_hesiod.so.2 lrwxrwxrwx 1 root root 37 May 3 10:19 libnss_nis.so -> /lib/x86_64-linux-gnu/libnss_nis.so.2 lrwxrwxrwx 1 root root 41 May 3 10:19 libnss_nisplus.so -> /lib/x86_64-linux-gnu/libnss_nisplus.so.2 -rw-r--r-- 1 root root 71K Jun 20 2018 libnuma.a lrwxrwxrwx 1 root root 16 Jun 20 2018 libnuma.so -> libnuma.so.1.0.0 lrwxrwxrwx 1 root root 16 Jun 20 2018 libnuma.so.1 -> libnuma.so.1.0.0 -rw-r--r-- 1 root root 43K Jun 20 2018 libnuma.so.1.0.0 -rw-r--r-- 1 root root 1.7M Oct 20 2021 libopcodes-2.30-system.so lrwxrwxrwx 1 root root 19 Jan 4 2021 libp11-kit.so.0 -> libp11-kit.so.0.3.0 -rw-r--r-- 1 root root 1.2M Jan 4 2021 libp11-kit.so.0.3.0 lrwxrwxrwx 1 root root 15 May 23 2018 libpanel.so.5 -> libpanel.so.5.9 -rw-r--r-- 1 root root 14K May 23 2018 libpanel.so.5.9 lrwxrwxrwx 1 root root 16 May 23 2018 libpanelw.so.5 -> libpanelw.so.5.9 -rw-r--r-- 1 root root 14K May 23 2018 libpanelw.so.5.9 lrwxrwxrwx 1 root root 22 May 17 07:42 libpcreposix.so.3 -> libpcreposix.so.3.13.3 -rw-r--r-- 1 root root 9.9K May 17 07:42 libpcreposix.so.3.13.3 lrwxrwxrwx 1 root root 17 Oct 19 2020 libperl.so.5.26 -> libperl.so.5.26.1 -rw-r--r-- 1 root root 2.0M Oct 19 2020 libperl.so.5.26.1 lrwxrwxrwx 1 root root 15 Mar 4 2018 libpsl.so.5 -> libpsl.so.5.2.0 -rw-r--r-- 1 root root 54K Mar 4 2018 libpsl.so.5.2.0 -rw-r--r-- 1 root root 6.0M May 3 10:19 libpthread.a -rw-r--r-- 1 root root 252 May 3 10:19 libpthread.so -rw-r--r-- 1 root root 29K May 3 10:19 libpthread_nonshared.a lrwxrwxrwx 1 root root 20 Mar 10 2020 libquadmath.so.0 -> libquadmath.so.0.0.0 -rw-r--r-- 1 root root 256K Mar 10 2020 libquadmath.so.0.0.0 -rw-r--r-- 1 root root 136K May 3 10:19 libresolv.a lrwxrwxrwx 1 root root 36 May 3 10:19 libresolv.so -> /lib/x86_64-linux-gnu/libresolv.so.2 lrwxrwxrwx 1 root root 18 Dec 15 2017 libroken.so.18 -> libroken.so.18.1.0 -rw-r--r-- 1 root root 87K Dec 15 2017 libroken.so.18.1.0 -rw-r--r-- 1 root root 52K May 3 10:19 librpcsvc.a -rw-r--r-- 1 root root 76K May 3 10:19 librt.a lrwxrwxrwx 1 root root 32 May 3 10:19 librt.so -> /lib/x86_64-linux-gnu/librt.so.1 -rw-r--r-- 1 root root 111K Apr 27 2016 librtmp.so.1 lrwxrwxrwx 1 root root 18 Feb 15 2022 libsasl2.so.2 -> libsasl2.so.2.0.25 -rw-r--r-- 1 root root 107K Feb 15 2022 libsasl2.so.2.0.25 -rw-r--r-- 1 root root 251K Mar 1 2018 libsemanage.so.1 lrwxrwxrwx 1 root root 19 Feb 5 2018 libsigsegv.so.2 -> libsigsegv.so.2.0.5 -rw-r--r-- 1 root root 15K Feb 5 2018 libsigsegv.so.2.0.5 lrwxrwxrwx 1 root root 19 Sep 14 17:02 libsqlite3.so.0 -> libsqlite3.so.0.8.6 -rw-r--r-- 1 root root 1.1M Sep 14 17:02 libsqlite3.so.0.8.6 -rw-r--r-- 1 root root 564K Jul 4 11:25 libssl.so.1.1 lrwxrwxrwx 1 root root 19 Mar 10 2020 libstdc++.so.6 -> libstdc++.so.6.0.25 -rw-r--r-- 1 root root 1.6M Mar 10 2020 libstdc++.so.6.0.25 lrwxrwxrwx 1 root root 17 Jan 21 2018 libtasn1.so.6 -> libtasn1.so.6.5.5 -rw-r--r-- 1 root root 74K Jan 21 2018 libtasn1.so.6.5.5 lrwxrwxrwx 1 root root 39 May 3 10:19 libthread_db.so -> /lib/x86_64-linux-gnu/libthread_db.so.1 lrwxrwxrwx 1 root root 13 May 23 2018 libtic.so.5 -> libtic.so.5.9 -rw-r--r-- 1 root root 62K May 23 2018 libtic.so.5.9 lrwxrwxrwx 1 root root 16 Mar 10 2020 libtsan.so.0 -> libtsan.so.0.0.0 -rw-r--r-- 1 root root 942K Mar 10 2020 libtsan.so.0.0.0 -rw-r--r-- 1 root root 3.6K Mar 10 2020 libtsan_preinit.o lrwxrwxrwx 1 root root 17 Dec 4 2019 libubsan.so.0 -> libubsan.so.0.0.0 -rw-r--r-- 1 root root 322K Dec 4 2019 libubsan.so.0.0.0 lrwxrwxrwx 1 root root 21 Mar 21 2019 libunistring.so.2 -> libunistring.so.2.1.0 -rw-r--r-- 1 root root 1.5M Mar 21 2019 libunistring.so.2.1.0 lrwxrwxrwx 1 root root 19 May 1 2020 liburcu-bp.so.6 -> liburcu-bp.so.6.0.0 -rw-r--r-- 1 root root 31K May 1 2020 liburcu-bp.so.6.0.0 lrwxrwxrwx 1 root root 20 May 1 2020 liburcu-cds.so.6 -> liburcu-cds.so.6.0.0 -rw-r--r-- 1 root root 38K May 1 2020 liburcu-cds.so.6.0.0 lrwxrwxrwx 1 root root 23 May 1 2020 liburcu-common.so.6 -> liburcu-common.so.6.0.0 -rw-r--r-- 1 root root 18K May 1 2020 liburcu-common.so.6.0.0 lrwxrwxrwx 1 root root 19 May 1 2020 liburcu-mb.so.6 -> liburcu-mb.so.6.0.0 -rw-r--r-- 1 root root 27K May 1 2020 liburcu-mb.so.6.0.0 lrwxrwxrwx 1 root root 21 May 1 2020 liburcu-qsbr.so.6 -> liburcu-qsbr.so.6.0.0 -rw-r--r-- 1 root root 27K May 1 2020 liburcu-qsbr.so.6.0.0 lrwxrwxrwx 1 root root 23 May 1 2020 liburcu-signal.so.6 -> liburcu-signal.so.6.0.0 -rw-r--r-- 1 root root 31K May 1 2020 liburcu-signal.so.6.0.0 lrwxrwxrwx 1 root root 16 May 1 2020 liburcu.so.6 -> liburcu.so.6.0.0 -rw-r--r-- 1 root root 31K May 1 2020 liburcu.so.6.0.0 -rw-r--r-- 1 root root 15K May 3 10:19 libutil.a lrwxrwxrwx 1 root root 34 May 3 10:19 libutil.so -> /lib/x86_64-linux-gnu/libutil.so.1 lrwxrwxrwx 1 root root 16 Dec 15 2017 libwind.so.0 -> libwind.so.0.0.0 -rw-r--r-- 1 root root 162K Dec 15 2017 libwind.so.0.0.0 lrwxrwxrwx 1 root root 16 Mar 3 2021 libzstd.so.1 -> libzstd.so.1.3.3 -rw-r--r-- 1 root root 490K Mar 3 2021 libzstd.so.1.3.3 drwxr-xr-x 1 root root 4.0K May 20 08:07 perl drwxr-xr-x 16 root root 4.0K Apr 28 00:03 perl-base -rw-r--r-- 1 root root 1.8K May 3 10:19 rcrt1.o drwxr-xr-x 2 root root 4.0K May 20 07:57 sasl2 [Pipeline] echo Packing build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so into dgl-cugraph-linux [Pipeline] stash Stashed 2 file(s) Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 4e33dd6e703ed0dd95f590a393973aab145ae4e5b4395a56a04d85836079138d $ docker rm -f 4e33dd6e703ed0dd95f590a393973aab145ae4e5b4395a56a04d85836079138d [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } [Pipeline] // stage [Pipeline] } [Pipeline] // parallel [Pipeline] } [Pipeline] // stage [Pipeline] stage [Pipeline] { (Test) [Pipeline] parallel [Pipeline] { (Branch: C++ CPU) [Pipeline] { (Branch: C++ GPU) [Pipeline] { (Branch: C++ CPU (Win64)) [Pipeline] { (Branch: Tensorflow CPU) [Pipeline] { (Branch: Tensorflow GPU) [Pipeline] { (Branch: Torch CPU) [Pipeline] { (Branch: Torch CPU (Win64)) [Pipeline] { (Branch: Torch GPU) [Pipeline] { (Branch: Distributed) [Pipeline] { (Branch: PyTorch Cugraph GPU) [Pipeline] { (Branch: MXNet CPU) [Pipeline] { (Branch: MXNet GPU) [Pipeline] { (Branch: DGL-Go) [Pipeline] stage [Pipeline] { (C++ CPU) [Pipeline] stage [Pipeline] { (C++ GPU) [Pipeline] stage [Pipeline] { (C++ CPU (Win64)) [Pipeline] stage [Pipeline] { (Tensorflow CPU) [Pipeline] stage [Pipeline] { (Tensorflow GPU) [Pipeline] stage [Pipeline] { (Torch CPU) [Pipeline] stage [Pipeline] { (Torch CPU (Win64)) [Pipeline] stage [Pipeline] { (Torch GPU) [Pipeline] stage [Pipeline] { (Distributed) [Pipeline] stage [Pipeline] { (PyTorch Cugraph GPU) [Pipeline] stage [Pipeline] { (MXNet CPU) [Pipeline] stage [Pipeline] { (MXNet GPU) [Pipeline] stage [Pipeline] { (DGL-Go) [Pipeline] node [Pipeline] node [Pipeline] node [Pipeline] node [Pipeline] node [Pipeline] node [Pipeline] node [Pipeline] node [Pipeline] node [Pipeline] node [Pipeline] node [Pipeline] node [Pipeline] node Still waiting to schedule task Waiting for next available executor on ‘windows’ Still waiting to schedule task Waiting for next available executor on ‘windows’ Still waiting to schedule task Waiting for next available executor on ‘dgl-manual-large-cpu’ Still waiting to schedule task Waiting for next available executor on ‘dglci-manual-gpu-worker’ Still waiting to schedule task Waiting for next available executor on ‘dgl-manual-large-cpu’ Still waiting to schedule task Waiting for next available executor on ‘dglci-manual-gpu-worker’ Still waiting to schedule task Waiting for next available executor on ‘dgl-manual-large-cpu’ Still waiting to schedule task Waiting for next available executor on ‘dglci-manual-gpu-worker’ Still waiting to schedule task Waiting for next available executor on ‘dgl-manual-large-cpu’ Still waiting to schedule task Waiting for next available executor on ‘dglci-manual-gpu-worker’ Still waiting to schedule task Waiting for next available executor on ‘dgl-manual-large-cpu’ Still waiting to schedule task Waiting for next available executor on ‘dglci-manual-gpu-worker’ Still waiting to schedule task Waiting for next available executor on ‘dgl-manual-large-cpu’ Running on dglci-windows in C:\Jenkins\workspace\dgl_PR-4648 [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags Cloning repository https://github.com/dmlc/dgl.git > git init C:\Jenkins\workspace\dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.20.0.windows.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace [Pipeline] withEnv [Pipeline] { > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse "HEAD^{commit}" # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Fetching changes from the remote Git repository Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir C:\Jenkins\workspace\dgl_PR-4648\.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.20.0.windows.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse "HEAD^{commit}" # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] bat administrator@WIN-O7QS55HVSDB C:\Jenkins\workspace\dgl_PR-4648>git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/METIS'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/dlpack'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/nccl'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/phmap'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/googletest'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/thrust'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash [Pipeline] echo Unpacked build\dgl.dll, build\runUnitTests.exe, build\tensoradapter\pytorch\*.dll from dgl-cpu-win64 [Pipeline] bat administrator@WIN-O7QS55HVSDB C:\Jenkins\workspace\dgl_PR-4648>CALL tests\scripts\task_cpp_unit_test.bat Running main() from C:\Jenkins\workspace\dgl_PR-4648\third_party\googletest\googletest\src\gtest_main.cc [==========] Running 89 tests from 21 test suites. [----------] Global test environment set-up. [----------] 1 test from GraphTest [ RUN ] GraphTest.TestNumVertices [ OK ] GraphTest.TestNumVertices (0 ms) [----------] 1 test from GraphTest (0 ms total) [----------] 3 tests from MessageQueueTest [ RUN ] MessageQueueTest.AddRemove [05:02:13] C:\Jenkins\workspace\dgl_PR-4648\src\rpc\network\msg_queue.cc:27: Message is larger than the queue. [ OK ] MessageQueueTest.AddRemove (0 ms) [ RUN ] MessageQueueTest.EmptyAndNoMoreAdd [ OK ] MessageQueueTest.EmptyAndNoMoreAdd (0 ms) [ RUN ] MessageQueueTest.MultiThread [ OK ] MessageQueueTest.MultiThread (9 ms) [----------] 3 tests from MessageQueueTest (9 ms total) [----------] 1 test from SocketCommunicatorTest [ RUN ] SocketCommunicatorTest.SendAndRecv [ OK ] SocketCommunicatorTest.SendAndRecv (6006 ms) [----------] 1 test from SocketCommunicatorTest (6006 ms total) [----------] 3 tests from SplitStringTest [ RUN ] SplitStringTest.SplitStringUsingCompoundDelim [ OK ] SplitStringTest.SplitStringUsingCompoundDelim (0 ms) [ RUN ] SplitStringTest.testSplitStringUsingSingleDelim [ OK ] SplitStringTest.testSplitStringUsingSingleDelim (0 ms) [ RUN ] SplitStringTest.testSplitingNoDelimString [ OK ] SplitStringTest.testSplitingNoDelimString (0 ms) [----------] 3 tests from SplitStringTest (0 ms total) [----------] 1 test from StringPrintf [ RUN ] StringPrintf.normal [ OK ] StringPrintf.normal (0 ms) [----------] 1 test from StringPrintf (0 ms total) [----------] 13 tests from ArrayTest [ RUN ] ArrayTest.TestCreate [ OK ] ArrayTest.TestCreate (0 ms) [ RUN ] ArrayTest.TestRange [ OK ] ArrayTest.TestRange (0 ms) [ RUN ] ArrayTest.TestFull [ OK ] ArrayTest.TestFull (0 ms) [ RUN ] ArrayTest.TestClone [ OK ] ArrayTest.TestClone (0 ms) [ RUN ] ArrayTest.TestAsNumBits [ OK ] ArrayTest.TestAsNumBits (0 ms) [ RUN ] ArrayTest.Arith [ OK ] ArrayTest.Arith (1 ms) [ RUN ] ArrayTest.HStack [ OK ] ArrayTest.HStack (0 ms) [ RUN ] ArrayTest.TestIndexSelect [ OK ] ArrayTest.TestIndexSelect (0 ms) [ RUN ] ArrayTest.TestRelabel_ [ OK ] ArrayTest.TestRelabel_ (0 ms) [ RUN ] ArrayTest.CumSum [ OK ] ArrayTest.CumSum (0 ms) [ RUN ] ArrayTest.Scatter_ [ OK ] ArrayTest.Scatter_ (0 ms) [ RUN ] ArrayTest.NonZero [ OK ] ArrayTest.NonZero (0 ms) [ RUN ] ArrayTest.Sort [ OK ] ArrayTest.Sort (0 ms) [----------] 13 tests from ArrayTest (1 ms total) [----------] 2 tests from MatrixTest [ RUN ] MatrixTest.TestToSimpleCsr [ OK ] MatrixTest.TestToSimpleCsr (1 ms) [ RUN ] MatrixTest.TestToSimpleCoo [ OK ] MatrixTest.TestToSimpleCoo (0 ms) [----------] 2 tests from MatrixTest (1 ms total) [----------] 2 tests from DisjointUnionTest [ RUN ] DisjointUnionTest.TestDisjointUnionPartitionCoo [ OK ] DisjointUnionTest.TestDisjointUnionPartitionCoo (0 ms) [ RUN ] DisjointUnionTest.TestDisjointUnionPartitionCsr [ OK ] DisjointUnionTest.TestDisjointUnionPartitionCsr (1 ms) [----------] 2 tests from DisjointUnionTest (1 ms total) [----------] 2 tests from SliceContiguousChunk [ RUN ] SliceContiguousChunk.TestSliceContiguousChunkCoo [ OK ] SliceContiguousChunk.TestSliceContiguousChunkCoo (0 ms) [ RUN ] SliceContiguousChunk.TestSliceContiguousChunkCsr [ OK ] SliceContiguousChunk.TestSliceContiguousChunkCsr (0 ms) [----------] 2 tests from SliceContiguousChunk (0 ms total) [----------] 2 tests from MatrixUnionTest [ RUN ] MatrixUnionTest.TestMatrixUnionCsr [ OK ] MatrixUnionTest.TestMatrixUnionCsr (0 ms) [ RUN ] MatrixUnionTest.TestMatrixUnionCoo [ OK ] MatrixUnionTest.TestMatrixUnionCoo (0 ms) [----------] 2 tests from MatrixUnionTest (0 ms total) [----------] 1 test from LineGraphTest [ RUN ] LineGraphTest.LineGraphCOO [ OK ] LineGraphTest.LineGraphCOO (0 ms) [----------] 1 test from LineGraphTest (0 ms total) [----------] 3 tests from CsrmmTest [ RUN ] CsrmmTest.TestCsrmm [ OK ] CsrmmTest.TestCsrmm (1 ms) [ RUN ] CsrmmTest.TestCsrsum [ OK ] CsrmmTest.TestCsrsum (1 ms) [ RUN ] CsrmmTest.TestCsrmask [ OK ] CsrmmTest.TestCsrmask (2 ms) [----------] 3 tests from CsrmmTest (4 ms total) [----------] 2 tests from PartitionTest [ RUN ] PartitionTest.TestRemainderPartition [ OK ] PartitionTest.TestRemainderPartition (0 ms) [ RUN ] PartitionTest.TestRangePartition [ OK ] PartitionTest.TestRangePartition (0 ms) [----------] 2 tests from PartitionTest (0 ms total) [----------] 11 tests from RowwiseTest [ RUN ] RowwiseTest.TestCSRSampling [ OK ] RowwiseTest.TestCSRSampling (22 ms) [ RUN ] RowwiseTest.TestCSRSamplingUniform [ OK ] RowwiseTest.TestCSRSamplingUniform (2 ms) [ RUN ] RowwiseTest.TestCSRPerEtypeSampling [ OK ] RowwiseTest.TestCSRPerEtypeSampling (11 ms) [ RUN ] RowwiseTest.TestCSRPerEtypeSamplingUniform [ OK ] RowwiseTest.TestCSRPerEtypeSamplingUniform (6 ms) [ RUN ] RowwiseTest.TestCOOSampling [ OK ] RowwiseTest.TestCOOSampling (1855 ms) [ RUN ] RowwiseTest.TestCOOSamplingUniform [ OK ] RowwiseTest.TestCOOSamplingUniform (213 ms) [ RUN ] RowwiseTest.TestCOOerEtypeSampling [ OK ] RowwiseTest.TestCOOerEtypeSampling (4322 ms) [ RUN ] RowwiseTest.TestCOOPerEtypeSamplingUniform [ OK ] RowwiseTest.TestCOOPerEtypeSamplingUniform (2814 ms) [ RUN ] RowwiseTest.TestCSRTopk [ OK ] RowwiseTest.TestCSRTopk (2 ms) [ RUN ] RowwiseTest.TestCOOTopk [ OK ] RowwiseTest.TestCOOTopk (21 ms) [ RUN ] RowwiseTest.TestCSRSamplingBiased [ OK ] RowwiseTest.TestCSRSamplingBiased (2 ms) [----------] 11 tests from RowwiseTest (9270 ms total) [----------] 3 tests from SampleUtilsTest [ RUN ] SampleUtilsTest.TestWithReplacement [ OK ] SampleUtilsTest.TestWithReplacement (2230 ms) [ RUN ] SampleUtilsTest.TestWithoutReplacementOrder [ OK ] SampleUtilsTest.TestWithoutReplacementOrder (0 ms) [ RUN ] SampleUtilsTest.TestWithoutReplacementUnique [ OK ] SampleUtilsTest.TestWithoutReplacementUnique (6263 ms) [----------] 3 tests from SampleUtilsTest (8493 ms total) [----------] 3 tests from RandomTest [ RUN ] RandomTest.TestChoice [ OK ] RandomTest.TestChoice (1 ms) [ RUN ] RandomTest.TestUniformChoice [ OK ] RandomTest.TestUniformChoice (0 ms) [ RUN ] RandomTest.TestBiasedChoice [ OK ] RandomTest.TestBiasedChoice (34 ms) [----------] 3 tests from RandomTest (36 ms total) [----------] 4 tests from Serialize [ RUN ] Serialize.UnitGraph_COO [ OK ] Serialize.UnitGraph_COO (0 ms) [ RUN ] Serialize.UnitGraph_CSR [ OK ] Serialize.UnitGraph_CSR (0 ms) [ RUN ] Serialize.ImmutableGraph [ OK ] Serialize.ImmutableGraph (1 ms) [ RUN ] Serialize.HeteroGraph [ OK ] Serialize.HeteroGraph (2 ms) [----------] 4 tests from Serialize (3 ms total) [----------] 3 tests from SmartPtrTest/0, where TypeParam = class std::shared_ptr [ RUN ] SmartPtrTest/0.Obj_Test [ OK ] SmartPtrTest/0.Obj_Test (0 ms) [ RUN ] SmartPtrTest/0.Vector_Test1 [ OK ] SmartPtrTest/0.Vector_Test1 (0 ms) [ RUN ] SmartPtrTest/0.Vector_Test2 [ OK ] SmartPtrTest/0.Vector_Test2 (0 ms) [----------] 3 tests from SmartPtrTest/0 (0 ms total) [----------] 3 tests from SmartPtrTest/1, where TypeParam = class std::unique_ptr > [ RUN ] SmartPtrTest/1.Obj_Test [ OK ] SmartPtrTest/1.Obj_Test (0 ms) [ RUN ] SmartPtrTest/1.Vector_Test1 [ OK ] SmartPtrTest/1.Vector_Test1 (0 ms) [ RUN ] SmartPtrTest/1.Vector_Test2 [ OK ] SmartPtrTest/1.Vector_Test2 (0 ms) [----------] 3 tests from SmartPtrTest/1 (0 ms total) [----------] 19 tests from SpmatTest [ RUN ] SpmatTest.COOToCSR [ OK ] SpmatTest.COOToCSR (0 ms) [ RUN ] SpmatTest.TestCOOHasDuplicate [ OK ] SpmatTest.TestCOOHasDuplicate (0 ms) [ RUN ] SpmatTest.COOSort [ OK ] SpmatTest.COOSort (0 ms) [ RUN ] SpmatTest.TestCOOReorder [ OK ] SpmatTest.TestCOOReorder (2 ms) [ RUN ] SpmatTest.COOGetData [ OK ] SpmatTest.COOGetData (7 ms) [ RUN ] SpmatTest.COOGetDataAndIndices [ OK ] SpmatTest.COOGetDataAndIndices (0 ms) [ RUN ] SpmatTest.TestCSRIsNonZero [ OK ] SpmatTest.TestCSRIsNonZero (0 ms) [ RUN ] SpmatTest.TestCSRGetRowNNZ [ OK ] SpmatTest.TestCSRGetRowNNZ (0 ms) [ RUN ] SpmatTest.TestCSRGetRowColumnIndices [ OK ] SpmatTest.TestCSRGetRowColumnIndices (0 ms) [ RUN ] SpmatTest.TestCSRGetRowData [ OK ] SpmatTest.TestCSRGetRowData (0 ms) [ RUN ] SpmatTest.CSRGetData [ OK ] SpmatTest.CSRGetData (1 ms) [ RUN ] SpmatTest.CSRGetDataAndIndices [ OK ] SpmatTest.CSRGetDataAndIndices (0 ms) [ RUN ] SpmatTest.CSRTranspose [ OK ] SpmatTest.CSRTranspose (0 ms) [ RUN ] SpmatTest.CSRToCOO [ OK ] SpmatTest.CSRToCOO (1 ms) [ RUN ] SpmatTest.TestCSRSliceRows [ OK ] SpmatTest.TestCSRSliceRows (30 ms) [ RUN ] SpmatTest.CSRSliceMatrix [ OK ] SpmatTest.CSRSliceMatrix (13 ms) [ RUN ] SpmatTest.CSRHasDuplicate [ OK ] SpmatTest.CSRHasDuplicate (0 ms) [ RUN ] SpmatTest.CSRSort [ OK ] SpmatTest.CSRSort (1 ms) [ RUN ] SpmatTest.TestCSRReorder [ OK ] SpmatTest.TestCSRReorder (0 ms) [----------] 19 tests from SpmatTest (55 ms total) [----------] 7 tests from UniGraphTest [ RUN ] UniGraphTest.TestUnitGraph_CopyTo [ OK ] UniGraphTest.TestUnitGraph_CopyTo (0 ms) [ RUN ] UniGraphTest.TestUnitGraph_InOutDegrees [ OK ] UniGraphTest.TestUnitGraph_InOutDegrees (4 ms) [ RUN ] UniGraphTest.TestUnitGraph_Create [ OK ] UniGraphTest.TestUnitGraph_Create (2 ms) [ RUN ] UniGraphTest.TestUnitGraph_GetInCSR [ OK ] UniGraphTest.TestUnitGraph_GetInCSR (4 ms) [ RUN ] UniGraphTest.TestUnitGraph_GetOutCSR [ OK ] UniGraphTest.TestUnitGraph_GetOutCSR (5 ms) [ RUN ] UniGraphTest.TestUnitGraph_GetCOO [ OK ] UniGraphTest.TestUnitGraph_GetCOO (1 ms) [ RUN ] UniGraphTest.TestUnitGraph_Reserve [ OK ] UniGraphTest.TestUnitGraph_Reserve (5 ms) [----------] 7 tests from UniGraphTest (21 ms total) [----------] Global test environment tear-down [==========] 89 tests from 21 test suites ran. (23901 ms total) [ PASSED ] 89 tests. Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... [WS-CLEANUP] done [Pipeline] } [Pipeline] // withEnv [Pipeline] } Running on dglci-windows in C:\Jenkins\workspace\dgl_PR-4648 [Pipeline] // node [Pipeline] { [Pipeline] } [Pipeline] // stage [Pipeline] } [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags Cloning repository https://github.com/dmlc/dgl.git > git init C:\Jenkins\workspace\dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.20.0.windows.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Cleaning workspace > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse "HEAD^{commit}" # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 Commit message: "fix for pytorch < 1.12" Cleaning workspace [Pipeline] withEnv [Pipeline] { [Pipeline] stage [Pipeline] { (Torch CPU (Win64) Unit test) [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Fetching changes from the remote Git repository Cleaning workspace > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir C:\Jenkins\workspace\dgl_PR-4648\.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.20.0.windows.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --force --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse "HEAD^{commit}" # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] bat administrator@WIN-O7QS55HVSDB C:\Jenkins\workspace\dgl_PR-4648>git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/METIS'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/dlpack'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/googletest'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/phmap'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/nccl'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/thrust'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... Cloning into 'C:/Jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash [Pipeline] echo Unpacked build\dgl.dll, build\runUnitTests.exe, build\tensoradapter\pytorch\*.dll from dgl-cpu-win64 [Pipeline] timeout Timeout set to expire in 30 min [Pipeline] { [Pipeline] bat administrator@WIN-O7QS55HVSDB C:\Jenkins\workspace\dgl_PR-4648>CALL tests\scripts\task_unit_test.bat pytorch Requirement already satisfied: pytest in c:\program files\python36\lib\site-packages (7.0.1) Requirement already satisfied: psutil in c:\users\administrator\envs\jenkins-dgl-pr-4648-2\lib\site-packages (5.9.2) Requirement already satisfied: pandas in c:\program files\python36\lib\site-packages (1.1.5) Requirement already satisfied: pyyaml in c:\program files\python36\lib\site-packages (6.0) Requirement already satisfied: pydantic in c:\program files\python36\lib\site-packages (1.9.0) Collecting rdflib Using cached rdflib-5.0.0-py3-none-any.whl (231 kB) Requirement already satisfied: colorama in c:\program files\python36\lib\site-packages (from pytest) (0.4.3) Requirement already satisfied: tomli>=1.0.0 in c:\program files\python36\lib\site-packages (from pytest) (1.2.3) Requirement already satisfied: py>=1.8.2 in c:\program files\python36\lib\site-packages (from pytest) (1.9.0) Requirement already satisfied: iniconfig in c:\program files\python36\lib\site-packages (from pytest) (1.0.1) Requirement already satisfied: pluggy<2.0,>=0.12 in c:\program files\python36\lib\site-packages (from pytest) (0.13.1) Requirement already satisfied: packaging in c:\program files\python36\lib\site-packages (from pytest) (20.4) Requirement already satisfied: importlib-metadata>=0.12 in c:\program files\python36\lib\site-packages (from pytest) (1.7.0) Requirement already satisfied: atomicwrites>=1.0 in c:\program files\python36\lib\site-packages (from pytest) (1.4.0) Requirement already satisfied: attrs>=19.2.0 in c:\program files\python36\lib\site-packages (from pytest) (19.3.0) Requirement already satisfied: numpy>=1.15.4 in c:\program files\python36\lib\site-packages (from pandas) (1.19.5) Requirement already satisfied: pytz>=2017.2 in c:\program files\python36\lib\site-packages (from pandas) (2021.1) Requirement already satisfied: python-dateutil>=2.7.3 in c:\program files\python36\lib\site-packages (from pandas) (2.8.1) Requirement already satisfied: dataclasses>=0.6 in c:\program files\python36\lib\site-packages (from pydantic) (0.8) Requirement already satisfied: typing-extensions>=3.7.4.3 in c:\program files\python36\lib\site-packages (from pydantic) (4.0.1) Collecting isodate Using cached isodate-0.6.1-py2.py3-none-any.whl (41 kB) Requirement already satisfied: pyparsing in c:\program files\python36\lib\site-packages (from rdflib) (2.4.7) Requirement already satisfied: six in c:\program files\python36\lib\site-packages (from rdflib) (1.15.0) Requirement already satisfied: zipp>=0.5 in c:\program files\python36\lib\site-packages (from importlib-metadata>=0.12->pytest) (3.1.0) Installing collected packages: isodate, rdflib Successfully installed isodate-0.6.1 rdflib-5.0.0 ============================= test session starts ============================= platform win32 -- Python 3.6.7, pytest-7.0.1, pluggy-0.13.1 -- C:\Users\Administrator\Envs\jenkins-dgl-PR-4648-2\Scripts\python.exe cachedir: .pytest_cache rootdir: C:\Jenkins\workspace\dgl_PR-4648 collecting ... collected 3978 items tests/pytorch/test_dataloader.py::test_graph_dataloader PASSED [ 0%] tests/pytorch/test_dataloader.py::test_cluster_gcn[0] SKIPPED (Do no...) [ 0%] tests/pytorch/test_dataloader.py::test_cluster_gcn[4] SKIPPED (Do no...) [ 0%] tests/pytorch/test_dataloader.py::test_shadow[0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_shadow[4] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[node-0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[node-4] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[edge-0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[edge-4] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[walk-0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[walk-4] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-cpu-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-cpu-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-uva_cuda_indices-idtype0] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-uva_cuda_indices-idtype1] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-uva_cpu_indices-idtype0] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-uva_cpu_indices-idtype1] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-pure_gpu-idtype0] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-pure_gpu-idtype1] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-cpu-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-cpu-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-uva_cuda_indices-idtype0] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-uva_cuda_indices-idtype1] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-uva_cpu_indices-idtype0] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-uva_cpu_indices-idtype1] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-pure_gpu-idtype0] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-pure_gpu-idtype1] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-full-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-full-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor2-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor2-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-full-idtype0] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-full-idtype1] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-neighbor-idtype0] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-neighbor-idtype1] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-neighbor2-idtype0] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-neighbor2-idtype1] SKIPPED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-full-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-full-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-neighbor-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-neighbor-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-neighbor2-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-neighbor2-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-full-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-full-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-neighbor-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-neighbor-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-neighbor2-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-neighbor2-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-full-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-full-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor2-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor2-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-full-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-full-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-neighbor-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-neighbor-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-neighbor2-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-neighbor2-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-full-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-full-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-neighbor-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-neighbor-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-neighbor2-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-neighbor2-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-full-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-full-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-neighbor-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-neighbor-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-neighbor2-idtype0] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-neighbor2-idtype1] SKIPPED [ 1%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-full-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-full-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-neighbor-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-neighbor-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler0-full-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler0-full-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler0-neighbor-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler0-neighbor-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler1-full-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler1-full-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler1-neighbor-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler1-neighbor-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler2-full-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler2-full-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler2-neighbor-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler2-neighbor-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler0-full-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler0-full-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler0-neighbor-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler0-neighbor-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler1-full-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler1-full-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler1-neighbor-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler1-neighbor-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler2-full-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler2-full-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler2-neighbor-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler2-neighbor-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-full-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-full-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-neighbor-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-neighbor-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-full-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-full-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-neighbor-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-neighbor-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler1-full-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler1-full-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler1-neighbor-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler1-neighbor-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler2-full-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler2-full-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler2-neighbor-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler2-neighbor-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler0-full-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler0-full-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler0-neighbor-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler0-neighbor-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-full-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-full-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-neighbor-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-neighbor-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-full-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-full-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-neighbor-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-neighbor-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-None-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-None-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-self-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-self-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-reverse_id-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-reverse_id-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-reverse_types-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-reverse_types-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-None-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-None-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-self-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-self-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-reverse_id-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-reverse_id-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-reverse_types-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-reverse_types-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-None-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-None-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-self-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-self-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-reverse_id-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-reverse_id-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-reverse_types-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-reverse_types-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-None-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-None-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-self-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-self-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-reverse_id-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-reverse_id-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-reverse_types-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-reverse_types-True] PASSED [ 4%] tests/pytorch/test_dist_optim.py::test_sparse_opt SKIPPED (Do not su...) [ 4%] tests/pytorch/test_geometry.py::test_fps PASSED [ 4%] tests/pytorch/test_geometry.py::test_fps_start_idx PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-euclidean-bruteforce-blas] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-euclidean-bruteforce] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-euclidean-kd-tree] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-cosine-bruteforce-blas] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-cosine-bruteforce] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-cosine-kd-tree] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-euclidean-bruteforce-blas] FAILED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-euclidean-bruteforce] FAILED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-euclidean-kd-tree] FAILED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-cosine-bruteforce-blas] FAILED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-cosine-bruteforce] FAILED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-cosine-kd-tree] FAILED [ 4%] tests/pytorch/test_geometry.py::test_knn_cuda[False-euclidean-bruteforce-blas] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cuda[False-euclidean-bruteforce] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cuda[False-euclidean-bruteforce-sharemem] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cuda[False-cosine-bruteforce-blas] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cuda[False-cosine-bruteforce] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[False-cosine-bruteforce-sharemem] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-euclidean-bruteforce-blas] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-euclidean-bruteforce] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-euclidean-bruteforce-sharemem] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-cosine-bruteforce-blas] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-cosine-bruteforce] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-cosine-bruteforce-sharemem] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g0-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g0-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g1-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g1-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g2-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g2-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g3-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g3-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g4-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g4-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g5-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g5-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g6-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g6-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g0-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g0-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g1-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g1-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g2-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g2-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g3-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g3-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g4-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g4-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g5-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g5-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g6-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g6-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g0-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g0-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g1-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g1-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g2-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g2-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g3-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g3-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g4-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g4-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g5-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g5-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g6-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g6-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g0-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g0-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g1-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g1-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g2-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g2-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g3-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g3-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g4-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g4-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g5-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g5-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g6-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g6-idtype1] PASSED [ 6%] tests/pytorch/test_ipc.py::test_torch_ipc SKIPPED (Do not support wi...) [ 6%] tests/pytorch/test_nn.py::test_graph_conv0[1] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv0[2] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g0-idtype0] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g0-idtype1] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g1-idtype0] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g1-idtype1] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g2-idtype0] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g2-idtype1] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g3-idtype0] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g3-idtype1] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g4-idtype0] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g4-idtype1] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g5-idtype0] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g5-idtype1] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g6-idtype0] FAILED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g6-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g7-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g7-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g0-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g0-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g1-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g1-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g2-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g2-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g3-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g3-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g4-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g4-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g5-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g5-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g6-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g6-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g7-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g7-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g0-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g0-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g1-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g1-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g2-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g2-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g3-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g3-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g4-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g4-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g5-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g5-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g6-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g6-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g7-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g7-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g0-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g0-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g1-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g1-idtype1] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g2-idtype0] FAILED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g2-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g3-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g3-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g4-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g4-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g5-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g5-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g6-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g6-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g7-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g7-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g0-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g0-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g1-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g1-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g2-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g2-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g3-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g3-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g4-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g4-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g5-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g5-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g6-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g6-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g7-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g7-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g0-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g0-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g1-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g1-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g2-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g2-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g3-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g3-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g4-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g4-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g5-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g5-idtype1] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g6-idtype0] FAILED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g6-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g7-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g7-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g0-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g0-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g1-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g1-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g2-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g2-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g3-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g3-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g4-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g4-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g5-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g5-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g6-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g6-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g7-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g7-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g0-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g0-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g1-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g1-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g2-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g2-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g3-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g3-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g4-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g4-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g5-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g5-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g6-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g6-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g7-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g7-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g0-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g0-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g1-idtype0] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g1-idtype1] FAILED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g2-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g2-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g3-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g3-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g4-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g4-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g5-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g5-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g6-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g6-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g7-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g7-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g0-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g0-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g1-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g1-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g2-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g2-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g3-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g3-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g4-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g4-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g5-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g5-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g6-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g6-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g7-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g7-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g0-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g0-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g1-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g1-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g2-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g2-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g3-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g3-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g4-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g4-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g5-idtype0] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g5-idtype1] FAILED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g6-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g6-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g7-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g7-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g0-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g0-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g1-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g1-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g2-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g2-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g3-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g3-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g4-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g4-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g5-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g5-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g6-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g6-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g7-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g7-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g0-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g0-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g1-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g1-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g2-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g2-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g3-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g3-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g4-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g4-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g5-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g5-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g6-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g6-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g7-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g7-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g0-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g0-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g1-idtype0] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g1-idtype1] FAILED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g2-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g2-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g3-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g3-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g4-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g4-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g5-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g5-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g6-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g6-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g7-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g7-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g0-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g0-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g1-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g1-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g2-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g2-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g3-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g3-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g4-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g4-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g5-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g5-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g6-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g6-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g7-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g7-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g0-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g0-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g1-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g1-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g2-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g2-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g3-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g3-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g4-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g4-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g5-idtype0] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g5-idtype1] FAILED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g6-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g6-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g7-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g7-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g0-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g0-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g1-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g1-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g2-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g2-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g3-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g3-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g4-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g4-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g5-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g5-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g6-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g6-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g7-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g7-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g0-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g0-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g1-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g1-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g2-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g2-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g3-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g3-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g4-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g4-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g5-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g5-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g6-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g6-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g7-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g7-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g0-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g0-idtype1] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g1-idtype0] FAILED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g1-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g2-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g2-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g3-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g3-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g4-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g4-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g5-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g5-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g6-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g6-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g7-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g7-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g0-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g0-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g1-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g1-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g2-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g2-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g3-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g3-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g4-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g4-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g5-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g5-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g6-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g6-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g7-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g7-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g0-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g0-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g1-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g1-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g2-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g2-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g3-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g3-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g4-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g4-idtype1] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g5-idtype0] FAILED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g5-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g6-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g6-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g7-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g7-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g0-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g0-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g1-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g1-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g2-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g2-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g3-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g3-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g4-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g4-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g5-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g5-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g6-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g6-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g7-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g7-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g0-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g0-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g1-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g1-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g2-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g2-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g3-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g3-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g4-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g4-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g5-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g5-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g6-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g6-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g7-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g7-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g0-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g0-idtype1] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g1-idtype0] FAILED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g1-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g2-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g2-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g3-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g3-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g4-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g4-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g5-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g5-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g6-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g6-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g7-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g7-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g0-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g0-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g1-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g1-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g2-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g2-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g3-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g3-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g4-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g4-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g5-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g5-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g6-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g6-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g7-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g7-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g0-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g0-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g1-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g1-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g2-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g2-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g3-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g3-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g4-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g4-idtype1] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g5-idtype0] FAILED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g5-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g6-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g6-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g7-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g7-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g0-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g0-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g1-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g1-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g2-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g2-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g3-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g3-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g4-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g4-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g5-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g5-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g6-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g6-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g7-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g7-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g0-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g0-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g1-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g1-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g2-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g2-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g3-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g3-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g4-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g4-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g5-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g5-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g6-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g6-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g7-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g7-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g0-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g0-idtype1] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g1-idtype0] FAILED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g1-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g2-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g2-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g3-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g3-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g4-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g4-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g5-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g5-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g6-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g6-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g7-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g7-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g0-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g0-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g1-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g1-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g2-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g2-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g3-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g3-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g4-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g4-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g5-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g5-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g6-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g6-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g7-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g7-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g0-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g0-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g1-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g1-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g2-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g2-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g3-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g3-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g4-idtype0] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g4-idtype1] FAILED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g5-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g5-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g6-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g6-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g7-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g7-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g0-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g0-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g1-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g1-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g2-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g2-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g3-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g3-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g4-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g4-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g5-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g5-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g6-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g6-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g7-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g7-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-none-g0-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-none-g0-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-both-g0-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-both-g0-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-right-g0-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-right-g0-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-none-g0-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-none-g0-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-both-g0-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-both-g0-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-right-g0-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-right-g0-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-none-g0-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-none-g0-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-both-g0-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-both-g0-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-right-g0-idtype0] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-right-g0-idtype1] FAILED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-none-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-none-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-both-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-both-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-right-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-right-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-none-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-none-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-both-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-both-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-right-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-right-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-none-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-none-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-both-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-both-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-right-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-right-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-none-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-none-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-both-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-both-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-right-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-right-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-none-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-none-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-both-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-both-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-right-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-right-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-none-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-none-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-both-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-both-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-right-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-right-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-none-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-none-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-both-g0-idtype0] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-both-g0-idtype1] FAILED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-right-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-right-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-none-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-none-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-both-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-both-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-right-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-right-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-none-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-none-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-both-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-both-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-right-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-right-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-none-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-none-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-both-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-both-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-right-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-right-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-none-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-none-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-both-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-both-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-right-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-right-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-none-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-none-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-both-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-both-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-right-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-right-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-none-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-none-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-both-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-both-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-right-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-right-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g0-idtype0] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g0-idtype1] FAILED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g1-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g1-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g0-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g0-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g1-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g1-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g0-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g0-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g1-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g1-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g0-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g0-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g1-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g1-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g0-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g0-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g1-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g1-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g0-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g0-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g1-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g1-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g0-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g0-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g1-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g1-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g0-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g0-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g1-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g1-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g0-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g0-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g1-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g1-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g0-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g0-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g1-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g1-idtype1] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g0-idtype0] FAILED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g0-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g1-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g1-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g0-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g0-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g1-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g1-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g0-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g0-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g1-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g1-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g0-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g0-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g1-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g1-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g0-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g0-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g1-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g1-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g0-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g0-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g1-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g1-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g0-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g0-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g1-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g1-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g0-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g0-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g1-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g1-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g0-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g0-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g1-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g1-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g0-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g0-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g1-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g1-idtype1] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g0-idtype0] FAILED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g0-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g1-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g1-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g0-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g0-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g1-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g1-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g0-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g0-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g1-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g1-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g0-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g0-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g1-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g1-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_tagconv[1] FAILED [ 24%] tests/pytorch/test_nn.py::test_tagconv[2] FAILED [ 24%] tests/pytorch/test_nn.py::test_set2set FAILED [ 24%] tests/pytorch/test_nn.py::test_glob_att_pool FAILED [ 24%] tests/pytorch/test_nn.py::test_simple_pool FAILED [ 24%] tests/pytorch/test_nn.py::test_set_trans PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn[1-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn[1-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn[8-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn[8-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn[32-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn[32-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[1-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[1-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[10-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[10-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[40-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[40-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g0-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g0-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g1-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g1-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g2-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g2-idtype1] FAILED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g3-idtype0] FAILED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g3-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g4-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g4-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g5-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g5-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g6-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g6-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g0-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g0-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g1-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g1-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g2-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g2-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g3-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g3-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g4-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g4-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g5-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g5-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g6-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g6-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g0-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g0-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g1-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g1-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g2-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g2-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g3-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g3-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g4-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g4-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g5-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g5-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g6-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g6-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g0-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g0-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g1-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g1-idtype1] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g2-idtype0] FAILED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g2-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g3-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g3-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g4-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g4-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g5-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g5-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g6-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g6-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g0-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g0-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g1-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g1-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g0-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g0-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g1-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g1-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g0-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g0-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g1-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g1-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g0-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g0-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g1-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g1-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g0-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g0-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g1-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g1-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g2-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g2-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g3-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g3-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g4-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g4-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g5-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g5-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g6-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g6-idtype1] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g0-idtype0] FAILED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g0-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g1-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g1-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g2-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g2-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g3-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g3-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g4-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g4-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g5-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g5-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g6-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g6-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g0-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g0-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g1-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g1-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g2-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g2-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g3-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g3-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g4-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g4-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g5-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g5-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g6-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g6-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g0-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g0-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g1-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g1-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g2-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g2-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g3-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g3-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g4-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g4-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g5-idtype0] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g5-idtype1] FAILED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g6-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g6-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g0-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g0-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g1-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g1-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g0-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g0-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g1-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g1-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g0-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g0-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g1-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g1-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g0-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g0-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g1-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g1-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g0-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g0-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g1-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g1-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g2-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g2-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g3-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g3-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g4-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g4-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g5-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g5-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g0-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g0-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g1-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g1-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g2-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g2-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g3-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g3-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g4-idtype0] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g4-idtype1] FAILED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g5-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g5-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g0-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g0-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g1-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g1-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g2-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g2-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g3-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g3-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g4-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g4-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g5-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g5-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g0-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g0-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g1-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g1-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g2-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g2-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g3-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g3-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g4-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g4-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g5-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g5-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g0-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g0-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g1-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g1-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g2-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g2-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g3-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g3-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g4-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g4-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g5-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g5-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g0-idtype0] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g0-idtype1] FAILED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g1-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g1-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g2-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g2-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g3-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g3-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g4-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g4-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g5-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g5-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g0-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g0-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g1-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g1-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g2-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g2-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g3-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g3-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g4-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g4-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g5-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g5-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g0-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g0-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g1-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g1-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g2-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g2-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g3-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g3-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g4-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g4-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g5-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g5-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g0-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g0-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g1-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g1-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g0-idtype0] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g0-idtype1] FAILED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g1-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g1-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g0-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g0-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g1-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g1-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g0-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g0-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g1-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g1-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g0-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g0-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g1-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g1-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g0-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g0-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g1-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g1-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g0-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g0-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g1-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g1-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g0-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g0-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g1-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g1-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g0-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g0-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g1-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g1-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g2-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g2-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g3-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g3-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g4-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g4-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g5-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g5-idtype1] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g6-idtype0] FAILED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g6-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[mean-g7-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[mean-g7-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g0-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g0-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g1-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g1-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g2-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g2-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g3-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g3-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g4-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g4-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g5-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g5-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g6-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g6-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g7-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g7-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g0-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g0-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g1-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g1-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g2-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g2-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g3-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g3-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g4-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g4-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g5-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g5-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g6-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g6-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g7-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g7-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g0-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g0-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g1-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g1-idtype1] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g2-idtype0] FAILED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g2-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g3-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g3-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g4-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g4-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g5-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g5-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g6-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g6-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g7-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g7-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g0-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g0-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g1-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g1-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g2-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g2-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g0-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g0-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g1-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g1-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g2-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g2-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g0-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g0-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g1-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g1-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g2-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g2-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g0-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g0-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g1-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g1-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g2-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g2-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g0-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g0-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g1-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g1-idtype1] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g2-idtype0] FAILED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g2-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g0-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g0-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g1-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g1-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g2-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g2-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g0-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g0-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g1-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g1-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g2-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g2-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g0-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g0-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g1-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g1-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g2-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g2-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv2[1-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv2[1-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv2[2-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sage_conv2[2-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g0-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g0-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g1-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g1-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g2-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g2-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g3-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g3-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g4-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g4-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g5-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g5-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g0-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g0-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g1-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g1-idtype1] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g2-idtype0] FAILED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g2-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g3-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g3-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g4-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g4-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g5-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g5-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g0-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g0-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g1-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g1-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g2-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g2-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g3-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g3-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g4-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g4-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g5-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g5-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g0-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g0-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g1-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g1-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g2-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g2-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g3-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g3-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g4-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g4-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g5-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g5-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g0-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g0-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g1-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g1-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g2-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g2-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g3-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g3-idtype1] FAILED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g4-idtype0] FAILED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g4-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g5-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g5-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g0-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g0-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g1-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g1-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g2-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g2-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g3-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g3-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g4-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g4-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g5-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g5-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g0-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g0-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g1-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g1-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g2-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g2-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g3-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g3-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g4-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g4-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g5-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g5-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g0-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g0-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g1-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g1-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g2-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g2-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g3-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g3-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g4-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g4-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g5-idtype0] FAILED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g5-idtype1] FAILED [ 36%] tests/pytorch/test_nn.py::test_gin_conv[mean-g0-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g0-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g1-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g1-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g2-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g2-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g3-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g3-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g4-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g4-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g5-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g5-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g6-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g6-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g0-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g0-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g1-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g1-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g2-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g2-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g3-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g3-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g4-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g4-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g5-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g5-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g6-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g6-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g0-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g0-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g1-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g1-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g2-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g2-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g3-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g3-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g4-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g4-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g5-idtype0] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g5-idtype1] FAILED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g6-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv[sum-g6-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g0-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g0-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g1-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g1-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g2-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g2-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g3-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g3-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g4-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g4-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g5-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g5-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g6-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g6-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g7-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g7-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g0-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g0-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g1-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g1-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[max-g0-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[max-g0-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[max-g1-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[max-g1-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g0-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g0-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g1-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g1-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g0-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g0-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g1-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g1-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g2-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g2-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g3-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g3-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g4-idtype0] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g4-idtype1] FAILED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g5-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv[g5-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv[g6-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv[g6-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv_bi[g0-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv_bi[g0-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv_bi[g1-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv_bi[g1-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g0-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g0-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g1-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g1-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g2-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g2-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g3-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g3-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g4-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g4-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g5-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g5-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g0-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g0-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g1-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g1-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g2-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g2-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g3-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g3-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g4-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g4-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g5-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g5-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g0-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g0-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g1-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g1-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g2-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g2-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g3-idtype0] FAILED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g3-idtype1] FAILED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g4-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_nn_conv[g4-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_nn_conv[g5-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_nn_conv[g5-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_nn_conv[g6-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_nn_conv[g6-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_nn_conv_bi[g0-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_nn_conv_bi[g0-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_nn_conv_bi[g1-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_nn_conv_bi[g1-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g0-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g0-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g1-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g1-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g2-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g2-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g3-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g3-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g4-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g4-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g5-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g5-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv_bi[g0-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv_bi[g0-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv_bi[g1-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv_bi[g1-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-both-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-both-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-right-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-right-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-none-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-none-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-both-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-both-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-right-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-right-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-none-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-none-idtype1] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-both-idtype0] FAILED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-both-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-right-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-right-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-none-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-none-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-both-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-both-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-right-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-right-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-none-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-none-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-both-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-both-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-right-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-right-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-none-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-none-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-both-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-both-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-right-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-right-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-none-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-none-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-both-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-both-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-right-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-right-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-none-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-none-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-both-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-both-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-right-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-right-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-none-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-none-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-both-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-both-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-right-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-right-idtype1] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-none-idtype0] FAILED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-none-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-both-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-both-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-right-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-right-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-none-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-none-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-both-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-both-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-right-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-right-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-none-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-none-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-both-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-both-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-right-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-right-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-none-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-none-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-both-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-both-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-right-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-right-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-none-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-none-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-both-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-both-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-right-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-right-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-none-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-none-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-both-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-both-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-right-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-right-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-none-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-none-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-both-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-both-idtype1] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-right-idtype0] FAILED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-right-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-none-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-none-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g0-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g0-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g1-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g1-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g2-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g2-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g3-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g3-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g4-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g4-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g5-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g5-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g6-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g6-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g7-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g7-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g8-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g8-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g9-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g9-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g0-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g0-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g1-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g1-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g2-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g2-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g3-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g3-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g4-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g4-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g5-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g5-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g6-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g6-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g7-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g7-idtype1] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g8-idtype0] FAILED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g8-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g9-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g9-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g0-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g0-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g1-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g1-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g2-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g2-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g3-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g3-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g4-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g4-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g5-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g5-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g6-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g6-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g0-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g0-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g1-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g1-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g2-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g2-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g3-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g3-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g4-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g4-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g5-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g5-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g6-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g6-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[1-g0-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[1-g0-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[1-g1-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[1-g1-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[2-g0-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[2-g0-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[2-g1-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[2-g1-idtype1] FAILED [ 44%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g0-idtype0] FAILED [ 44%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g0-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g1-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g1-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g2-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g2-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g3-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g3-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g4-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g4-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g5-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g5-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g6-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g6-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g0-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g0-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g1-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g1-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g2-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g2-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g3-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g3-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g4-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g4-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g5-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g5-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g6-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g6-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g0-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g0-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g1-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g1-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g2-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g2-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g3-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g3-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g4-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g4-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g5-idtype0] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g5-idtype1] FAILED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g6-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g6-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g0-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g0-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g1-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g1-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g2-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g2-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g3-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g3-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g4-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g4-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g5-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g5-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g6-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g6-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g0-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g0-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g1-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g1-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g0-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g0-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g1-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g1-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g0-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g0-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g1-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g1-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g0-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g0-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g1-idtype0] FAILED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g1-idtype1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dense_cheb_conv[1] FAILED [ 46%] tests/pytorch/test_nn.py::test_dense_cheb_conv[2] FAILED [ 46%] tests/pytorch/test_nn.py::test_sequential FAILED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g0-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g0-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g1-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g1-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g2-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g2-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g3-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g3-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g4-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g4-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g5-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g5-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g0-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g0-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g1-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g1-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g2-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g2-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g3-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g3-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g4-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g4-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g5-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g5-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g6-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g6-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g7-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g7-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g0-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g0-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g1-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g1-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g2-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g2-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g3-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g3-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g4-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g4-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g5-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g5-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g6-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g6-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g7-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g7-idtype1] FAILED [ 47%] tests/pytorch/test_nn.py::test_hetero_conv[False-sum-idtype0] FAILED [ 47%] tests/pytorch/test_nn.py::test_hetero_conv[False-sum-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-max-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-max-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-min-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-min-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-mean-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-mean-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-stack-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-stack-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-myagg-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-myagg-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-sum-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-sum-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-max-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-max-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-min-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-min-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-mean-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-mean-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-stack-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-stack-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-myagg-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-myagg-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_hetero_linear[1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_linear[2] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_linear[100] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_embedding[1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_embedding[2] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_embedding[100] PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g0-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g0-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g1-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g1-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g2-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g2-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g3-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g3-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g4-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g4-idtype1] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g5-idtype0] FAILED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g5-idtype1] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g0-idtype0] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g0-idtype1] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g1-idtype0] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g1-idtype1] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g2-idtype0] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g2-idtype1] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g3-idtype0] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g3-idtype1] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g4-idtype0] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g4-idtype1] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g5-idtype0] FAILED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g5-idtype1] FAILED [ 49%] tests/pytorch/test_nn.py::test_jumping_knowledge PASSED [ 49%] tests/pytorch/test_nn.py::test_edge_predictor[dot] PASSED [ 49%] tests/pytorch/test_nn.py::test_edge_predictor[cos] PASSED [ 49%] tests/pytorch/test_nn.py::test_edge_predictor[ele] PASSED [ 49%] tests/pytorch/test_nn.py::test_edge_predictor[cat] PASSED [ 49%] tests/pytorch/test_nn.py::test_ke_score_funcs PASSED [ 49%] tests/pytorch/test_nn.py::test_twirls FAILED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[None-None-4] PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[None-None-32] PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[basis-4-4] PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[basis-4-32] PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[bdd-4-4] PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[bdd-4-32] PASSED [ 49%] tests/pytorch/test_nn.py::test_hgt[1-4-idtype0] FAILED [ 49%] tests/pytorch/test_nn.py::test_hgt[1-4-idtype1] FAILED [ 49%] tests/pytorch/test_nn.py::test_radius_graph[True-True] PASSED [ 49%] tests/pytorch/test_nn.py::test_radius_graph[True-False] PASSED [ 49%] tests/pytorch/test_nn.py::test_radius_graph[False-True] PASSED [ 49%] tests/pytorch/test_nn.py::test_radius_graph[False-False] PASSED [ 49%] tests/pytorch/test_nn.py::test_group_rev_res[idtype0] FAILED [ 49%] tests/pytorch/test_nn.py::test_group_rev_res[idtype1] FAILED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-16-16-16] FAILED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-16-16-32] FAILED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-16-32-16] FAILED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-16-32-32] FAILED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-32-16-16] FAILED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-32-16-32] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[16-32-32-16] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[16-32-32-32] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-16-16-16] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-16-16-32] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-16-32-16] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-16-32-32] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-32-16-16] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-32-16-32] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-32-32-16] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-32-32-32] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-16-16-16] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-16-16-32] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-16-32-16] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-16-32-32] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-32-16-16] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-32-16-32] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-32-32-16] FAILED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-32-32-32] FAILED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.0-3] FAILED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.0-5] FAILED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.5-3] FAILED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.5-5] FAILED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-1.0-3] FAILED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-1.0-5] FAILED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.0-3] FAILED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.0-5] FAILED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.5-3] FAILED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.5-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-1.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-1.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.5-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.5-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-1.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-1.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.5-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.5-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-1.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-1.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.5-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.5-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-1.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-1.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.5-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.5-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-1.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-1.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.5-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.5-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-1.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-1.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.5-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.5-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-1.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-1.0-5] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.0-3] FAILED [ 70%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.5-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.5-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-1.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-1.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.5-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.5-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-1.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-1.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.5-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.5-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-1.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-1.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.5-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.5-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-1.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-1.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.5-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.5-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-1.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-1.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.5-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.5-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-1.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-1.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.0-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.5-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.5-5] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-1.0-3] FAILED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-1.0-5] FAILED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.0-3] FAILED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.0-5] FAILED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.5-3] FAILED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.5-5] FAILED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-1.0-3] FAILED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-1.0-5] FAILED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-32-16] Running on dglci-manual-gpu-worker in /root/jenkins/workspace/dgl_PR-4648 [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 Commit message: "fix for pytorch < 1.12" Cleaning workspace PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-32-32] [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 + docker pull dgllib/dgl-ci-gpu:cu101_v220816 cu101_v220816: Pulling from dgllib/dgl-ci-gpu Digest: sha256:ca40fc52876a2563a4e904d0c271d658c1acc8e6a4f8611b578bb49f8c7fd925 Status: Image is up to date for dgllib/dgl-ci-gpu:cu101_v220816 docker.io/dgllib/dgl-ci-gpu:cu101_v220816 [Pipeline] } PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-16-16] [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker inspect -f . dgllib/dgl-ci-gpu:cu101_v220816 . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dglci-manual-gpu-worker does not seem to be running inside a container $ docker run -t -d -u 0:0 --runtime nvidia -w /root/jenkins/workspace/dgl_PR-4648 -v /root/jenkins/workspace/dgl_PR-4648:/root/jenkins/workspace/dgl_PR-4648:rw,z -v /root/jenkins/workspace/dgl_PR-4648@tmp:/root/jenkins/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-gpu:cu101_v220816 cat $ docker top b2202f61772e10290ef30a280d6bba21fca8f51e5ea4851aba487f9ad5c84e0e -eo pid,comm [Pipeline] { [Pipeline] sh + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@37856046; decorates RemoteLauncher[hudson.remoting.Channel@41a64267:dglci-manual-gpu-worker] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace Fetching without tags PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-16-32] Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] sh + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dlpack'... PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-32-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/googletest'... PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-32-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-32-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nccl'... PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators1-16-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/phmap'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators1-32-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust'... PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators0-16-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm'... PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators2-32-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators2-32-32] Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-16-16] Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-16-32] Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-32-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-32-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-16-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-32-16] Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-32-32] Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators0-16-16] Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators0-32-16] Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators0-32-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators1-16-32] Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators0-32-32] [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-gpu-linux [Pipeline] sh PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-16-16] + bash tests/scripts/task_cpp_unit_test.sh /root/jenkins/workspace/dgl_PR-4648 ~/jenkins/workspace/dgl_PR-4648/build ~/jenkins/workspace/dgl_PR-4648 total 284M -rwxr-xr-x 1 root root 282M Sep 27 04:50 libdgl.so -rwxr-xr-x 1 root root 2.5M Sep 27 04:50 runUnitTests drwxr-xr-x 3 root root 4.0K Sep 27 05:17 tensoradapter Running main() from /root/jenkins/workspace/dgl_PR-4648@2/third_party/googletest/googletest/src/gtest_main.cc [==========] Running 101 tests from 23 test suites. [----------] Global test environment set-up. [----------] 1 test from GraphTest [ RUN ] GraphTest.TestNumVertices [ OK ] GraphTest.TestNumVertices (0 ms) [----------] 1 test from GraphTest (0 ms total) [----------] 3 tests from MessageQueueTest [ RUN ] MessageQueueTest.AddRemove [05:17:45] /root/jenkins/workspace/dgl_PR-4648@2/src/rpc/network/msg_queue.cc:27: Message is larger than the queue. [ OK ] MessageQueueTest.AddRemove (1 ms) [ RUN ] MessageQueueTest.EmptyAndNoMoreAdd [ OK ] MessageQueueTest.EmptyAndNoMoreAdd (0 ms) [ RUN ] MessageQueueTest.MultiThread [ OK ] MessageQueueTest.MultiThread (6 ms) [----------] 3 tests from MessageQueueTest (7 ms total) [----------] 3 tests from SocketCommunicatorTest [ RUN ] SocketCommunicatorTest.SendAndRecv PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators0-32-32] [ OK ] SocketCommunicatorTest.SendAndRecv (7002 ms) [ RUN ] SocketCommunicatorTest.SendAndRecvTimeout PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators1-32-16] [ OK ] SocketCommunicatorTest.SendAndRecvTimeout (5000 ms) [ RUN ] SocketCommunicatorTest.TCPSocketBind [ OK ] SocketCommunicatorTest.TCPSocketBind (0 ms) [----------] 3 tests from SocketCommunicatorTest (12002 ms total) [----------] 3 tests from SplitStringTest [ RUN ] SplitStringTest.SplitStringUsingCompoundDelim [ OK ] SplitStringTest.SplitStringUsingCompoundDelim (0 ms) [ RUN ] SplitStringTest.testSplitStringUsingSingleDelim [ OK ] SplitStringTest.testSplitStringUsingSingleDelim (0 ms) [ RUN ] SplitStringTest.testSplitingNoDelimString [ OK ] SplitStringTest.testSplitingNoDelimString (0 ms) [----------] 3 tests from SplitStringTest (0 ms total) [----------] 1 test from StringPrintf [ RUN ] StringPrintf.normal [ OK ] StringPrintf.normal (0 ms) [----------] 1 test from StringPrintf (0 ms total) [----------] 13 tests from ArrayTest [ RUN ] ArrayTest.TestCreate [ OK ] ArrayTest.TestCreate (0 ms) [ RUN ] ArrayTest.TestRange PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators1-32-32] [ OK ] ArrayTest.TestRange (701 ms) [ RUN ] ArrayTest.TestFull [ OK ] ArrayTest.TestFull (0 ms) [ RUN ] ArrayTest.TestClone [ OK ] ArrayTest.TestClone (0 ms) [ RUN ] ArrayTest.TestAsNumBits [ OK ] ArrayTest.TestAsNumBits (0 ms) [ RUN ] ArrayTest.Arith [ OK ] ArrayTest.Arith (1 ms) [ RUN ] ArrayTest.HStack [ OK ] ArrayTest.HStack (1 ms) [ RUN ] ArrayTest.TestIndexSelect [ OK ] ArrayTest.TestIndexSelect (0 ms) [ RUN ] ArrayTest.TestRelabel_ [ OK ] ArrayTest.TestRelabel_ (1 ms) [ RUN ] ArrayTest.CumSum [ OK ] ArrayTest.CumSum (0 ms) [ RUN ] ArrayTest.Scatter_ [ OK ] ArrayTest.Scatter_ (0 ms) [ RUN ] ArrayTest.NonZero [ OK ] ArrayTest.NonZero (1 ms) [ RUN ] ArrayTest.Sort [ OK ] ArrayTest.Sort (0 ms) [----------] 13 tests from ArrayTest (706 ms total) [----------] 2 tests from MatrixTest [ RUN ] MatrixTest.TestToSimpleCsr [ OK ] MatrixTest.TestToSimpleCsr (1 ms) [ RUN ] MatrixTest.TestToSimpleCoo [ OK ] MatrixTest.TestToSimpleCoo (0 ms) [----------] 2 tests from MatrixTest (1 ms total) [----------] 2 tests from DisjointUnionTest [ RUN ] DisjointUnionTest.TestDisjointUnionPartitionCoo [ OK ] DisjointUnionTest.TestDisjointUnionPartitionCoo (0 ms) [ RUN ] DisjointUnionTest.TestDisjointUnionPartitionCsr [ OK ] DisjointUnionTest.TestDisjointUnionPartitionCsr (0 ms) [----------] 2 tests from DisjointUnionTest (0 ms total) [----------] 2 tests from SliceContiguousChunk [ RUN ] SliceContiguousChunk.TestSliceContiguousChunkCoo [ OK ] SliceContiguousChunk.TestSliceContiguousChunkCoo (0 ms) [ RUN ] SliceContiguousChunk.TestSliceContiguousChunkCsr [ OK ] SliceContiguousChunk.TestSliceContiguousChunkCsr (1 ms) [----------] 2 tests from SliceContiguousChunk (1 ms total) [----------] 2 tests from MatrixUnionTest [ RUN ] MatrixUnionTest.TestMatrixUnionCsr [ OK ] MatrixUnionTest.TestMatrixUnionCsr (0 ms) [ RUN ] MatrixUnionTest.TestMatrixUnionCoo [ OK ] MatrixUnionTest.TestMatrixUnionCoo (0 ms) [----------] 2 tests from MatrixUnionTest (0 ms total) [----------] 1 test from LineGraphTest [ RUN ] LineGraphTest.LineGraphCOO [ OK ] LineGraphTest.LineGraphCOO (0 ms) [----------] 1 test from LineGraphTest (0 ms total) [----------] 3 tests from CsrmmTest [ RUN ] CsrmmTest.TestCsrmm PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators2-16-16] [ OK ] CsrmmTest.TestCsrmm (337 ms) [ RUN ] CsrmmTest.TestCsrsum [ OK ] CsrmmTest.TestCsrsum (5 ms) [ RUN ] CsrmmTest.TestCsrmask [ OK ] CsrmmTest.TestCsrmask (0 ms) [----------] 3 tests from CsrmmTest (342 ms total) [----------] 2 tests from PartitionTest [ RUN ] PartitionTest.TestRemainderPartition [ OK ] PartitionTest.TestRemainderPartition (19 ms) [ RUN ] PartitionTest.TestRangePartition [ OK ] PartitionTest.TestRangePartition (23 ms) [----------] 2 tests from PartitionTest (42 ms total) [----------] 11 tests from RowwiseTest [ RUN ] RowwiseTest.TestCSRSampling [ OK ] RowwiseTest.TestCSRSampling (11 ms) [ RUN ] RowwiseTest.TestCSRSamplingUniform [ OK ] RowwiseTest.TestCSRSamplingUniform (2 ms) [ RUN ] RowwiseTest.TestCSRPerEtypeSampling [ OK ] RowwiseTest.TestCSRPerEtypeSampling (10 ms) [ RUN ] RowwiseTest.TestCSRPerEtypeSamplingUniform [ OK ] RowwiseTest.TestCSRPerEtypeSamplingUniform (6 ms) [ RUN ] RowwiseTest.TestCOOSampling PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators2-16-32] [ OK ] RowwiseTest.TestCOOSampling (201 ms) [ RUN ] RowwiseTest.TestCOOSamplingUniform [ OK ] RowwiseTest.TestCOOSamplingUniform (33 ms) [ RUN ] RowwiseTest.TestCOOerEtypeSampling PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators1-16-16] [ OK ] RowwiseTest.TestCOOerEtypeSampling (3897 ms) [ RUN ] RowwiseTest.TestCOOPerEtypeSamplingUniform PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators2-32-32] [ OK ] RowwiseTest.TestCOOPerEtypeSamplingUniform (2637 ms) [ RUN ] RowwiseTest.TestCSRTopk [ OK ] RowwiseTest.TestCSRTopk (6 ms) [ RUN ] RowwiseTest.TestCOOTopk [ OK ] RowwiseTest.TestCOOTopk (4 ms) [ RUN ] RowwiseTest.TestCSRSamplingBiased [ OK ] RowwiseTest.TestCSRSamplingBiased (2 ms) [----------] 11 tests from RowwiseTest (6809 ms total) [----------] 3 tests from SampleUtilsTest [ RUN ] SampleUtilsTest.TestWithReplacement PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators2-32-16] [ OK ] SampleUtilsTest.TestWithReplacement (3302 ms) [ RUN ] SampleUtilsTest.TestWithoutReplacementOrder [ OK ] SampleUtilsTest.TestWithoutReplacementOrder (0 ms) [ RUN ] SampleUtilsTest.TestWithoutReplacementUnique PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators0-32-32] [ OK ] SampleUtilsTest.TestWithoutReplacementUnique (4103 ms) [----------] 3 tests from SampleUtilsTest (7405 ms total) [----------] 3 tests from RandomTest [ RUN ] RandomTest.TestChoice [ OK ] RandomTest.TestChoice (1 ms) [ RUN ] RandomTest.TestUniformChoice [ OK ] RandomTest.TestUniformChoice (0 ms) [ RUN ] RandomTest.TestBiasedChoice [ OK ] RandomTest.TestBiasedChoice (16 ms) [----------] 3 tests from RandomTest (17 ms total) [----------] 4 tests from Serialize [ RUN ] Serialize.UnitGraph_COO [ OK ] Serialize.UnitGraph_COO (0 ms) [ RUN ] Serialize.UnitGraph_CSR [ OK ] Serialize.UnitGraph_CSR (0 ms) [ RUN ] Serialize.ImmutableGraph [ OK ] Serialize.ImmutableGraph (0 ms) [ RUN ] Serialize.HeteroGraph [ OK ] Serialize.HeteroGraph (0 ms) [----------] 4 tests from Serialize (0 ms total) [----------] 3 tests from SmartPtrTest/0, where TypeParam = std::shared_ptr [ RUN ] SmartPtrTest/0.Obj_Test [ OK ] SmartPtrTest/0.Obj_Test (0 ms) [ RUN ] SmartPtrTest/0.Vector_Test1 [ OK ] SmartPtrTest/0.Vector_Test1 (0 ms) [ RUN ] SmartPtrTest/0.Vector_Test2 [ OK ] SmartPtrTest/0.Vector_Test2 (0 ms) [----------] 3 tests from SmartPtrTest/0 (0 ms total) [----------] 3 tests from SmartPtrTest/1, where TypeParam = std::unique_ptr > [ RUN ] SmartPtrTest/1.Obj_Test [ OK ] SmartPtrTest/1.Obj_Test (0 ms) [ RUN ] SmartPtrTest/1.Vector_Test1 [ OK ] SmartPtrTest/1.Vector_Test1 (0 ms) [ RUN ] SmartPtrTest/1.Vector_Test2 [ OK ] SmartPtrTest/1.Vector_Test2 (0 ms) [----------] 3 tests from SmartPtrTest/1 (0 ms total) [----------] 19 tests from SpmatTest [ RUN ] SpmatTest.COOToCSR [ OK ] SpmatTest.COOToCSR (4 ms) [ RUN ] SpmatTest.TestCOOHasDuplicate [ OK ] SpmatTest.TestCOOHasDuplicate (0 ms) [ RUN ] SpmatTest.COOSort [ OK ] SpmatTest.COOSort (2 ms) [ RUN ] SpmatTest.TestCOOReorder [ OK ] SpmatTest.TestCOOReorder (0 ms) [ RUN ] SpmatTest.COOGetData [ OK ] SpmatTest.COOGetData (10 ms) [ RUN ] SpmatTest.COOGetDataAndIndices [ OK ] SpmatTest.COOGetDataAndIndices (0 ms) [ RUN ] SpmatTest.TestCSRIsNonZero [ OK ] SpmatTest.TestCSRIsNonZero (3 ms) [ RUN ] SpmatTest.TestCSRGetRowNNZ [ OK ] SpmatTest.TestCSRGetRowNNZ (0 ms) [ RUN ] SpmatTest.TestCSRGetRowColumnIndices [ OK ] SpmatTest.TestCSRGetRowColumnIndices (1 ms) [ RUN ] SpmatTest.TestCSRGetRowData [ OK ] SpmatTest.TestCSRGetRowData (0 ms) [ RUN ] SpmatTest.CSRGetData [ OK ] SpmatTest.CSRGetData (2 ms) [ RUN ] SpmatTest.CSRGetDataAndIndices [ OK ] SpmatTest.CSRGetDataAndIndices (0 ms) [ RUN ] SpmatTest.CSRTranspose [ OK ] SpmatTest.CSRTranspose (1 ms) [ RUN ] SpmatTest.CSRToCOO [ OK ] SpmatTest.CSRToCOO (1 ms) [ RUN ] SpmatTest.TestCSRSliceRows [ OK ] SpmatTest.TestCSRSliceRows (56 ms) [ RUN ] SpmatTest.CSRSliceMatrix [ OK ] SpmatTest.CSRSliceMatrix (6 ms) [ RUN ] SpmatTest.CSRHasDuplicate [ OK ] SpmatTest.CSRHasDuplicate (1 ms) [ RUN ] SpmatTest.CSRSort [ OK ] SpmatTest.CSRSort (1 ms) [ RUN ] SpmatTest.TestCSRReorder [ OK ] SpmatTest.TestCSRReorder (0 ms) [----------] 19 tests from SpmatTest (88 ms total) [----------] 6 tests from SpmmTest [ RUN ] SpmmTest.TestSpmmCopyLhs [ OK ] SpmmTest.TestSpmmCopyLhs (1 ms) [ RUN ] SpmmTest.TestSpmmCopyRhs [ OK ] SpmmTest.TestSpmmCopyRhs (0 ms) [ RUN ] SpmmTest.TestSpmmAdd [ OK ] SpmmTest.TestSpmmAdd (0 ms) [ RUN ] SpmmTest.TestSpmmSub [ OK ] SpmmTest.TestSpmmSub (1 ms) [ RUN ] SpmmTest.TestSpmmMul [ OK ] SpmmTest.TestSpmmMul (0 ms) [ RUN ] SpmmTest.TestSpmmDiv [ OK ] SpmmTest.TestSpmmDiv (0 ms) [----------] 6 tests from SpmmTest (2 ms total) [----------] 7 tests from UniGraphTest [ RUN ] UniGraphTest.TestUnitGraph_CopyTo [ OK ] UniGraphTest.TestUnitGraph_CopyTo (1 ms) [ RUN ] UniGraphTest.TestUnitGraph_InOutDegrees PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators1-16-16] [ OK ] UniGraphTest.TestUnitGraph_InOutDegrees (14 ms) [ RUN ] UniGraphTest.TestUnitGraph_Create [ OK ] UniGraphTest.TestUnitGraph_Create (0 ms) [ RUN ] UniGraphTest.TestUnitGraph_GetInCSR [ OK ] UniGraphTest.TestUnitGraph_GetInCSR (0 ms) [ RUN ] UniGraphTest.TestUnitGraph_GetOutCSR [ OK ] UniGraphTest.TestUnitGraph_GetOutCSR (1 ms) [ RUN ] UniGraphTest.TestUnitGraph_GetCOO [ OK ] UniGraphTest.TestUnitGraph_GetCOO (0 ms) [ RUN ] UniGraphTest.TestUnitGraph_Reserve [ OK ] UniGraphTest.TestUnitGraph_Reserve (0 ms) [----------] 7 tests from UniGraphTest (16 ms total) [----------] 4 tests from ZeroCopySerialize [ RUN ] ZeroCopySerialize.NDArray [ OK ] ZeroCopySerialize.NDArray (0 ms) [ RUN ] ZeroCopySerialize.ZeroShapeNDArray [ OK ] ZeroCopySerialize.ZeroShapeNDArray (0 ms) [ RUN ] ZeroCopySerialize.SharedMem [ OK ] ZeroCopySerialize.SharedMem (0 ms) [ RUN ] ZeroCopySerialize.HeteroGraph [ OK ] ZeroCopySerialize.HeteroGraph (0 ms) [----------] 4 tests from ZeroCopySerialize (0 ms total) [----------] Global test environment tear-down [==========] 101 tests from 23 test suites ran. (27438 ms total) [ PASSED ] 101 tests. ~/jenkins/workspace/dgl_PR-4648 Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 b2202f61772e10290ef30a280d6bba21fca8f51e5ea4851aba487f9ad5c84e0e PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators0-32-16] $ docker rm -f b2202f61772e10290ef30a280d6bba21fca8f51e5ea4851aba487f9ad5c84e0e PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators2-16-32] [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } Running on dglci-manual-gpu-worker in /root/jenkins/workspace/dgl_PR-4648 [Pipeline] // node [Pipeline] { [Pipeline] } [Pipeline] // stage [Pipeline] } [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators0-16-32] Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators2-16-16] Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators2-16-32] Commit message: "fix for pytorch < 1.12" Cleaning workspace PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators1-16-16] [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators1-32-16] + docker pull dgllib/dgl-ci-gpu:cu101_v220816 PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators0-16-32] cu101_v220816: Pulling from dgllib/dgl-ci-gpu Digest: sha256:ca40fc52876a2563a4e904d0c271d658c1acc8e6a4f8611b578bb49f8c7fd925 Status: Image is up to date for dgllib/dgl-ci-gpu:cu101_v220816 docker.io/dgllib/dgl-ci-gpu:cu101_v220816 [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators0-32-32] + docker inspect -f . dgllib/dgl-ci-gpu:cu101_v220816 . PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators1-16-32] [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dglci-manual-gpu-worker does not seem to be running inside a container $ docker run -t -d -u 0:0 --runtime nvidia -w /root/jenkins/workspace/dgl_PR-4648 -v /root/jenkins/workspace/dgl_PR-4648:/root/jenkins/workspace/dgl_PR-4648:rw,z -v /root/jenkins/workspace/dgl_PR-4648@tmp:/root/jenkins/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-gpu:cu101_v220816 cat PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators2-16-32] $ docker top accf251fd0b56b1d65207f2fd238e906ccb6fddb8b2532d85c1dfc3329fb23a3 -eo pid,comm [Pipeline] { [Pipeline] stage PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators2-32-32] [Pipeline] { (Tensorflow GPU Unit test) [Pipeline] sh PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators0-16-32] + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@1ed0eff6; decorates RemoteLauncher[hudson.remoting.Channel@41a64267:dglci-manual-gpu-worker] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace Fetching without tags PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators1-16-32] Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators1-32-32] Commit message: "fix for pytorch < 1.12" Cleaning workspace PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators2-16-32] > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators2-32-32] [Pipeline] sh PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators0-16-32] + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS'... PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators2-32-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dlpack'... PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators0-16-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators2-16-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/googletest'... PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators2-32-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators2-32-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators1-16-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nccl'... PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators2-16-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/phmap'... PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators0-16-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators2-16-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust'... PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators2-16-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm'... PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam[1] SKIPPED (Do not sup...) [ 91%] tests/pytorch/test_optim.py::test_sparse_adam[4] SKIPPED (Do not sup...) [ 91%] tests/pytorch/test_optim.py::test_sparse_adam[101] SKIPPED (Do not s...) [ 91%] tests/pytorch/test_optim.py::test_sparse_adam[1024] SKIPPED (Do not ...) [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1-False] SKIPPED (...) [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1-True] SKIPPED (D...) [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1-None] SKIPPED (D...) [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[4-False] SKIPPED (...) [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[4-True] SKIPPED (D...) [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[4-None] SKIPPED (D...) [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[101-False] SKIPPED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[101-True] SKIPPED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[101-None] SKIPPED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1024-False] SKIPPED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1024-True] SKIPPED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1024-None] SKIPPED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[1-dtype0] SKIPPED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[1-dtype1] SKIPPED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[4-dtype0] SKIPPED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[4-dtype1] SKIPPED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[101-dtype0] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[101-dtype1] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[1024-dtype0] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[1024-dtype1] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_sparse_adam_zero_step SKIPPED (Do ...) [ 92%] tests/pytorch/test_optim.py::test_multiprocess_cpu_sparse_adam[2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_cpu_sparse_adam[4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[nccl-2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[nccl-4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[nccl-8] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[gloo-2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[gloo-4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[gloo-8] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_cuda_tensor[2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_cuda_tensor[4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_cuda_tensor[8] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_cpu_zero_step[2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_cpu_zero_step[4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[nccl-2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[nccl-4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[nccl-8] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[gloo-2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[gloo-4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[gloo-8] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step_cuda_tensor[2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step_cuda_tensor[4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step_cuda_tensor[8] SKIPPED [ 92%] tests/pytorch/test_pickle.py::test_pickling_batched_graph PASSED [ 92%] tests/pytorch/test_pin_memory.py::test_pin_noncontiguous SKIPPED (Ne...) [ 92%] tests/pytorch/test_pin_memory.py::test_pin_view SKIPPED (Need gpu fo...) [ 92%] tests/pytorch/test_pin_memory.py::test_unpin_automatically SKIPPED (...) [ 92%] tests/pytorch/test_pin_memory.py::test_pin_unpin_column SKIPPED (Nee...) [ 92%] tests/pytorch/test_pin_memory.py::test_pin_empty SKIPPED (Need gpu f...) [ 92%] tests/pytorch/test_sparse_emb.py::test_multiprocess_sparse_emb_get_set[1] SKIPPED [ 92%] tests/pytorch/test_sparse_emb.py::test_multiprocess_sparse_emb_get_set[2] SKIPPED [ 92%] tests/pytorch/test_sparse_emb.py::test_multiprocess_sparse_emb_get_set[3] SKIPPED [ 92%] tests/pytorch/test_stream.py::test_basics SKIPPED (stream only runs ...) [ 92%] tests/pytorch/test_stream.py::test_set_get_stream SKIPPED (stream on...) [ 92%] tests/pytorch/test_stream.py::test_record_stream_ndarray SKIPPED (st...) [ 92%] tests/pytorch/test_stream.py::test_record_stream_graph_positive SKIPPED [ 92%] tests/pytorch/test_stream.py::test_record_stream_graph_negative SKIPPED [ 93%] tests/pytorch/test_unified_tensor.py::test_unified_tensor SKIPPED (D...) [ 93%] tests/pytorch/test_unified_tensor.py::test_multi_gpu_unified_tensor[1] SKIPPED [ 93%] tests/pytorch/test_unified_tensor.py::test_multi_gpu_unified_tensor[2] SKIPPED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[None-val_shape0] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[None-val_shape1] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[mat_shape1-val_shape0] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[mat_shape1-val_shape1] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[mat_shape2-val_shape0] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[mat_shape2-val_shape1] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[None-shape0] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[None-shape1] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[None-shape2] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[2-shape0] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[2-shape1] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[2-shape2] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[add-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[add-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[add-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[add-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[sub-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[sub-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[sub-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[sub-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[mul-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[mul-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[mul-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[mul-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[truediv-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[truediv-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[truediv-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[truediv-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[add-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[add-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[add-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[add-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[sub-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[sub-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[sub-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[sub-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[mul-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[mul-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[mul-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[mul-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[truediv-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[truediv-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[truediv-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[truediv-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2.5-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2.5-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2.5-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2.5-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2.5-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2.5-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2.5-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2.5-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_expose_op PASSED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col0-row0-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col0-row0-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col0-row1-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col0-row1-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col1-row0-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col1-row0-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col1-row1-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col1-row1-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col0-row0-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col0-row0-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col0-row1-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col0-row1-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col1-row0-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col1-row0-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col1-row1-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col1-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col0-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col0-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col0-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col0-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col1-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col1-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col1-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col1-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col0-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col0-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col0-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col0-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col1-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col1-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col1-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col1-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col0-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col0-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col0-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col0-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col1-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col1-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col1-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col1-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col0-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col0-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col0-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col0-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col1-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col1-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col1-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col1-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col0-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col0-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col0-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col0-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col1-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col1-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col1-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col1-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col0-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col0-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col0-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col0-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col1-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col1-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col1-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col1-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col0-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col0-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col0-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col0-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col1-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col1-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col1-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col1-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col0-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col0-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col0-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col0-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col1-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col1-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col1-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col1-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col0-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col0-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col0-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col0-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col1-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col1-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col1-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col1-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col0-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col0-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col0-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col0-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col1-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col1-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col1-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col1-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col0-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col0-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col0-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col0-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col1-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col1-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col1-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col1-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col0-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col0-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col0-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col0-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col1-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col1-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col1-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col1-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col0-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col0-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col0-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col0-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col1-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col1-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col1-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col1-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col0-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col0-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col0-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col0-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col1-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col1-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col1-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col1-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col0-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col0-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col0-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col0-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col1-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col1-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col1-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col1-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col0-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col0-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col0-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col0-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col1-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col1-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col1-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col1-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col0-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col0-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col0-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col0-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col1-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col1-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col1-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col1-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col0-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col0-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col0-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col0-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col1-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col1-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col1-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col1-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col0-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col0-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col0-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col0-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col1-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col1-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col1-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col1-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col0-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col0-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col0-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col0-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col1-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col1-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col1-row1-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col1-row1-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col0-row0-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col0-row0-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col0-row1-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col0-row1-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col1-row0-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col1-row0-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col1-row1-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col1-row1-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col0-row0-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col0-row0-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col0-row1-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col0-row1-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col1-row0-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col1-row0-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col1-row1-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col1-row1-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[None-val_shape0] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[None-val_shape1] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[mat_shape1-val_shape0] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[mat_shape1-val_shape1] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[mat_shape2-val_shape0] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[mat_shape2-val_shape1] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col0-row0-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col0-row0-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col0-row1-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col0-row1-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col1-row0-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col1-row0-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col1-row1-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col1-row1-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col0-row0-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col0-row0-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col0-row1-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col0-row1-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col1-row0-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col1-row0-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col1-row1-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col1-row1-2] PASSED [100%] ================================== FAILURES =================================== ________________ test_knn_cpu[True-euclidean-bruteforce-blas] _________________ algorithm = 'bruteforce-blas', dist = 'euclidean', exclude_self = True @pytest.mark.parametrize('algorithm', ['bruteforce-blas', 'bruteforce', 'kd-tree']) @pytest.mark.parametrize('dist', ['euclidean', 'cosine']) @pytest.mark.parametrize('exclude_self', [False, True]) def test_knn_cpu(algorithm, dist, exclude_self): > _test_knn_common(F.cpu(), algorithm, dist, exclude_self) tests\pytorch\test_geometry.py:159: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests\pytorch\test_geometry.py:74: in _test_knn_common g = kg(x.view(2, 4, 3), algorithm, dist, exclude_self) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\factory.py:128: in forward exclude_self=exclude_self) python\dgl\transforms\functional.py:248: in knn_graph result = remove_self_loop(result) python\dgl\transforms\functional.py:2032: in remove_self_loop new_g = remove_edges(g, self_loop_eids, etype=etype) python\dgl\transforms\functional.py:1755: in remove_edges g.remove_edges(eids, etype=etype, store_ids=store_ids) python\dgl\heterograph.py:648: in remove_edges one_hot_removed_edges, reducer='sum') python\dgl\ops\segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0.]) offsets = tensor([ 0, 16, 32]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:780: TypeError ___________________ test_knn_cpu[True-euclidean-bruteforce] ___________________ algorithm = 'bruteforce', dist = 'euclidean', exclude_self = True @pytest.mark.parametrize('algorithm', ['bruteforce-blas', 'bruteforce', 'kd-tree']) @pytest.mark.parametrize('dist', ['euclidean', 'cosine']) @pytest.mark.parametrize('exclude_self', [False, True]) def test_knn_cpu(algorithm, dist, exclude_self): > _test_knn_common(F.cpu(), algorithm, dist, exclude_self) tests\pytorch\test_geometry.py:159: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests\pytorch\test_geometry.py:74: in _test_knn_common g = kg(x.view(2, 4, 3), algorithm, dist, exclude_self) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\factory.py:128: in forward exclude_self=exclude_self) python\dgl\transforms\functional.py:248: in knn_graph result = remove_self_loop(result) python\dgl\transforms\functional.py:2032: in remove_self_loop new_g = remove_edges(g, self_loop_eids, etype=etype) python\dgl\transforms\functional.py:1755: in remove_edges g.remove_edges(eids, etype=etype, store_ids=store_ids) python\dgl\heterograph.py:648: in remove_edges one_hot_removed_edges, reducer='sum') python\dgl\ops\segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 1.]) offsets = tensor([ 0, 16, 32]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:780: TypeError ____________________ test_knn_cpu[True-euclidean-kd-tree] _____________________ algorithm = 'kd-tree', dist = 'euclidean', exclude_self = True @pytest.mark.parametrize('algorithm', ['bruteforce-blas', 'bruteforce', 'kd-tree']) @pytest.mark.parametrize('dist', ['euclidean', 'cosine']) @pytest.mark.parametrize('exclude_self', [False, True]) def test_knn_cpu(algorithm, dist, exclude_self): > _test_knn_common(F.cpu(), algorithm, dist, exclude_self) tests\pytorch\test_geometry.py:159: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests\pytorch\test_geometry.py:74: in _test_knn_common g = kg(x.view(2, 4, 3), algorithm, dist, exclude_self) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\factory.py:128: in forward exclude_self=exclude_self) python\dgl\transforms\functional.py:248: in knn_graph result = remove_self_loop(result) python\dgl\transforms\functional.py:2032: in remove_self_loop new_g = remove_edges(g, self_loop_eids, etype=etype) python\dgl\transforms\functional.py:1755: in remove_edges g.remove_edges(eids, etype=etype, store_ids=store_ids) python\dgl\heterograph.py:648: in remove_edges one_hot_removed_edges, reducer='sum') python\dgl\ops\segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0.]) offsets = tensor([ 0, 16, 32]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:780: TypeError __________________ test_knn_cpu[True-cosine-bruteforce-blas] __________________ algorithm = 'bruteforce-blas', dist = 'cosine', exclude_self = True @pytest.mark.parametrize('algorithm', ['bruteforce-blas', 'bruteforce', 'kd-tree']) @pytest.mark.parametrize('dist', ['euclidean', 'cosine']) @pytest.mark.parametrize('exclude_self', [False, True]) def test_knn_cpu(algorithm, dist, exclude_self): > _test_knn_common(F.cpu(), algorithm, dist, exclude_self) tests\pytorch\test_geometry.py:159: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests\pytorch\test_geometry.py:74: in _test_knn_common g = kg(x.view(2, 4, 3), algorithm, dist, exclude_self) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\factory.py:128: in forward exclude_self=exclude_self) python\dgl\transforms\functional.py:248: in knn_graph result = remove_self_loop(result) python\dgl\transforms\functional.py:2032: in remove_self_loop new_g = remove_edges(g, self_loop_eids, etype=etype) python\dgl\transforms\functional.py:1755: in remove_edges g.remove_edges(eids, etype=etype, store_ids=store_ids) python\dgl\heterograph.py:648: in remove_edges one_hot_removed_edges, reducer='sum') python\dgl\ops\segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0.]) offsets = tensor([ 0, 16, 32]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:780: TypeError ____________________ test_knn_cpu[True-cosine-bruteforce] _____________________ algorithm = 'bruteforce', dist = 'cosine', exclude_self = True @pytest.mark.parametrize('algorithm', ['bruteforce-blas', 'bruteforce', 'kd-tree']) @pytest.mark.parametrize('dist', ['euclidean', 'cosine']) @pytest.mark.parametrize('exclude_self', [False, True]) def test_knn_cpu(algorithm, dist, exclude_self): > _test_knn_common(F.cpu(), algorithm, dist, exclude_self) tests\pytorch\test_geometry.py:159: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests\pytorch\test_geometry.py:74: in _test_knn_common g = kg(x.view(2, 4, 3), algorithm, dist, exclude_self) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\factory.py:128: in forward exclude_self=exclude_self) python\dgl\transforms\functional.py:248: in knn_graph result = remove_self_loop(result) python\dgl\transforms\functional.py:2032: in remove_self_loop new_g = remove_edges(g, self_loop_eids, etype=etype) python\dgl\transforms\functional.py:1755: in remove_edges g.remove_edges(eids, etype=etype, store_ids=store_ids) python\dgl\heterograph.py:648: in remove_edges one_hot_removed_edges, reducer='sum') python\dgl\ops\segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 1.]) offsets = tensor([ 0, 16, 32]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:780: TypeError ______________________ test_knn_cpu[True-cosine-kd-tree] ______________________ algorithm = 'kd-tree', dist = 'cosine', exclude_self = True @pytest.mark.parametrize('algorithm', ['bruteforce-blas', 'bruteforce', 'kd-tree']) @pytest.mark.parametrize('dist', ['euclidean', 'cosine']) @pytest.mark.parametrize('exclude_self', [False, True]) def test_knn_cpu(algorithm, dist, exclude_self): > _test_knn_common(F.cpu(), algorithm, dist, exclude_self) tests\pytorch\test_geometry.py:159: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests\pytorch\test_geometry.py:74: in _test_knn_common g = kg(x.view(2, 4, 3), algorithm, dist, exclude_self) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\factory.py:128: in forward exclude_self=exclude_self) python\dgl\transforms\functional.py:248: in knn_graph result = remove_self_loop(result) python\dgl\transforms\functional.py:2032: in remove_self_loop new_g = remove_edges(g, self_loop_eids, etype=etype) python\dgl\transforms\functional.py:1755: in remove_edges g.remove_edges(eids, etype=etype, store_ids=store_ids) python\dgl\heterograph.py:648: in remove_edges one_hot_removed_edges, reducer='sum') python\dgl\ops\segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0.]) offsets = tensor([ 0, 16, 32]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:780: TypeError _____________________________ test_graph_conv0[1] _____________________________ out_dim = 1 @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv0(out_dim): g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx) conv = nn.GraphConv(5, out_dim, norm='none', bias=True) conv = conv.to(ctx) print(conv) # test pickle th.save(conv, tmp_buffer) # test#1: basic h0 = F.ones((3, 5)) > h1 = conv(g, h0) tests\pytorch\test_nn.py:39: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.9117], [0.9117], [0.9117]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ---------------------------- Captured stdout call ----------------------------- GraphConv(in=5, out=1, normalization=none, activation=None) _____________________________ test_graph_conv0[2] _____________________________ out_dim = 2 @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv0(out_dim): g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx) conv = nn.GraphConv(5, out_dim, norm='none', bias=True) conv = conv.to(ctx) print(conv) # test pickle th.save(conv, tmp_buffer) # test#1: basic h0 = F.ones((3, 5)) > h1 = conv(g, h0) tests\pytorch\test_nn.py:39: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2014, -1.7951], [-0.2014, -1.7951], [-0.2014, -1.7951]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ---------------------------- Captured stdout call ----------------------------- GraphConv(in=5, out=2, normalization=none, activation=None) ________________ test_graph_conv[1-True-True-none-g0-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8223], [ 0.7276]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g0-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0147], [-1.4779]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g1-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3774], [-0.4557], [-0.3917], [ 0.7560], [-1.0727], [-1.1545], [ 0.1115], [ 0.3102], [-0.1478], [ 2.0557]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g1-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4357], [-0.3045], [ 0.5054], [ 0.1024], [ 0.9517], [ 0.2717], [ 0.0886], [-0.6465], [-0.0434], [ 1.2168]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g2-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2488], [ 0.2138], [ 1.1414], [ 1.1984], [ 0.4325], [-1.0267], [ 0.7766], [-0.4516], [-0.4094], [ 1.3725]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g2-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5254], [ 1.0542], [ 1.3021], [ 1.0440], [ 0.1947], [-0.8581], [-0.5537], [-1.6341], [ 0.6213], [ 1.5097]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g3-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5068], [-0.3208], [ 0.6224], [ 0.3713], [-1.5798], [-0.1363], [ 1.8579], [ 0.7797], [ 0.1811], [ 1.0436]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g3-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8289], [-0.0837], [-2.3822], [-0.7160], [-0.5155], [ 0.6484], [ 1.1284], [-0.1840], [-1.2974], [-3.3558]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g4-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.0381], [ 0.1247], [ 0.5124], [-1.7189], [-1.7077], [ 1.7704], [-0.4823], [-1.9169], [ 0.2390]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g4-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5360], [-0.1115], [ 1.1599], [-0.0047], [ 0.3746], [-0.8104], [ 0.2047], [ 1.1318], [ 0.9987]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g5-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2075], [ 0.1210], [-0.6167], [ 0.8429], [ 0.1643], [-0.3728], [-0.2843], [-1.1507], [-0.1210], [-0.4389]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g5-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5894], [-0.0116], [-0.8686], [ 0.1827], [ 0.3602], [ 0.5652], [-0.2060], [ 0.6458], [ 0.0123], [-0.5896]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g6-idtype0] _________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.4553], [-2.3494], [ 0.5549], [-1.5514], [ 0.3303], [-0.6024]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g6-idtype1] _________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3246], [ 2.3402], [-2.6253], [-0.1761], [ 0.9155], [-0.4409]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g7-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0979], [ 0.7488], [-0.0341], [-0.6230], [-0.3343], [ 0.7471], [ 0.4251], [ 0.0942], [ 0.5293], [-0.1892]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-none-g7-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1872], [-0.4040], [-1.4210], [ 1.7123], [-1.0717], [ 0.1974], [-0.6033], [ 0.4072], [-0.5829], [-0.2204]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g0-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8189], [ 1.0706]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g0-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1778], [ 0.2324]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g1-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3643], [-0.8181], [ 1.6384], [ 1.1556], [ 1.4191], [ 3.6268], [-1.3846], [ 0.6336], [-1.5143], [-0.3379]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g1-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9189], [ 1.1011], [ 0.1610], [ 0.6617], [-0.1451], [ 0.2839], [ 0.1247], [ 0.6932], [ 0.0778], [ 1.3752]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g2-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6773], [-0.2205], [ 1.5709], [ 1.3530], [-1.6610], [-2.5343], [ 1.1941], [ 0.4840], [ 0.7863], [-0.7006]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g2-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4969], [-0.1960], [ 0.1433], [ 0.6285], [ 1.2778], [ 0.3453], [ 0.8179], [ 0.6267], [ 0.2244], [ 1.0523]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g3-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9824], [-0.6180], [ 0.9852], [ 0.4254], [-0.3827], [ 0.2962], [ 0.2570], [-0.0637], [ 1.0724], [ 0.1239]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g3-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-5.9205e-04], [ 1.2998e+00], [ 1.0132e+00], [-2.0308e-01], [ 5.5357e-01], ...01], [-1.3187e+00], [ 5.9655e-01], [-1.7961e+00], [-1.3357e+00]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g4-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6708], [ 0.3308], [-2.0196], [-0.7199], [-1.0714], [ 0.1628], [-1.8664], [-0.4424], [-0.2624]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g4-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3273], [ 0.2117], [-0.1677], [-1.2674], [ 1.1400], [ 0.3679], [ 1.1484], [ 0.6590], [ 0.6034]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g5-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0594], [ 0.2425], [ 0.8313], [ 0.1795], [-0.8446], [-0.6349], [-0.5330], [ 0.7257], [-2.5928], [ 0.0765]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g5-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3994], [ 0.5239], [-0.4658], [-0.3767], [-1.2460], [-0.1192], [ 0.6714], [ 3.3496], [ 0.4079], [ 1.1584]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g6-idtype0] _________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0530], [-0.5401], [ 1.2201], [-2.5792], [ 0.2281], [ 0.9623]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g6-idtype1] _________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5524], [-1.0499], [-3.3022], [-0.1428], [ 0.7136], [-2.6226]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g7-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.9467], [-0.4187], [-0.5070], [-0.8936], [-0.0709], [-2.6504], [ 1.0916], [ 0.6802], [-0.5709], [ 1.8184]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-both-g7-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2574], [ 0.2301], [-0.2629], [-0.8789], [ 2.8606], [-0.8293], [ 0.7272], [ 0.0625], [-0.9932], [-3.1976]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[4.7687], [3.1110]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.5203], [0.2693]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2542], [-0.6458], [-1.2538], [ 2.2001], [ 1.9990], [-0.2682], [-2.3272], [-1.5221], [-0.6127], [ 1.5270]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.7344], [-0.9932], [-1.4484], [ 1.4421], [-0.6340], [ 1.3776], [-1.5206], [-0.0028], [ 0.8921], [-1.9648]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5211], [ 1.3808], [-1.2648], [ 1.2036], [-3.3805], [ 0.1068], [-0.4450], [-0.9805], [-1.5781], [-1.9320]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4215], [-2.6242], [-0.2604], [ 0.0904], [ 1.3921], [-2.3748], [ 0.5906], [-0.6220], [ 0.0819], [ 0.4416]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7173], [-0.6420], [-0.0549], [-0.2515], [ 0.4849], [ 1.5220], [ 0.9913], [ 0.4167], [-1.2981], [-0.3690]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2959], [-2.0179], [-1.1616], [ 0.7495], [ 0.3636], [ 0.2828], [ 3.2298], [ 0.9372], [ 0.6765], [-1.0290]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.6934], [-0.1125], [ 1.8192], [-2.3530], [ 2.2126], [-0.5675], [-0.8449], [-1.0943], [-0.1088]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7966], [-1.1554], [-0.4580], [ 1.2094], [-1.8406], [-0.1825], [-0.1467], [ 0.8038], [ 0.8419]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-4.2496], [ 0.0835], [ 0.6602], [-0.6661], [-1.5652], [-0.8721], [ 1.6695], [-0.3479], [-0.2666], [-0.5852]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3945], [-1.3850], [ 0.3137], [-0.5863], [-1.7720], [-1.2231], [ 2.0397], [ 0.0272], [ 2.1933], [-1.4832]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9290], [ 1.1920], [ 0.9226], [ 0.6478], [-1.2884], [-0.6153]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0325], [-2.2404], [ 0.2782], [ 0.2156], [-1.1665], [ 1.7703]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1675], [-0.5347], [ 0.5502], [-2.0449], [ 0.7123], [ 1.2910], [ 1.5387], [ 1.6971], [ 1.3014], [ 0.0460]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-right-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.3587e+00], [-1.3794e-01], [ 8.6305e-01], [-8.3138e-01], [ 1.2370e-01], ...00], [ 1.6678e-01], [ 8.9375e-01], [ 2.7686e-04], [ 8.4640e-01]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g0-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2470], [-0.2297]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g0-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0680], [ 0.0469]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g1-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4420], [-0.3754], [ 0.6135], [-0.6264], [-1.8864], [-1.2964], [ 0.1367], [ 0.7154], [-0.1115], [-0.1078]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g1-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8868], [-0.8612], [ 0.3331], [-0.4362], [-1.7782], [ 0.3539], [-0.8969], [ 1.0638], [ 1.8934], [ 0.8213]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g2-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1859], [-0.9136], [-0.4591], [-0.4074], [-0.2436], [-0.9790], [ 0.6441], [ 2.3218], [ 1.6560], [-0.8347]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g2-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3226], [ 0.3242], [ 0.3617], [-0.0668], [ 1.5841], [-0.8224], [-0.0996], [ 0.0072], [ 1.0775], [ 0.9886]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g3-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2247], [-0.6381], [-0.2281], [-0.2151], [ 3.4745], [ 0.8205], [-0.1342], [-3.0598], [-2.3273], [ 1.5261]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g3-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0523], [ 0.5282], [-0.2292], [-0.1588], [ 1.7005], [-0.1604], [-0.7490], [ 0.2056], [ 0.5445], [-2.7731]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g4-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2416], [ 0.0594], [ 0.4015], [-2.1429], [ 2.9310], [-0.2389], [-0.6514], [-0.6388], [-0.4177]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g4-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5803], [ 0.4159], [-0.3154], [-0.2973], [-0.7268], [-0.4339], [ 0.1537], [ 0.2137], [ 0.3777]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g5-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2459], [-0.6376], [ 0.4816], [ 0.0243], [ 0.5500], [-0.0834], [-0.1993], [-0.1725], [ 1.4527], [ 0.4886]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g5-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2644], [-0.1707], [ 0.1003], [-0.3199], [ 0.2597], [-3.6168], [-0.0572], [-0.3321], [ 1.6833], [-1.0310]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g6-idtype0] _________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3125], [ 0.3997], [-0.4831], [ 0.8024], [ 0.3698], [ 0.2594]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g6-idtype1] _________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1268], [ 3.4421], [-0.2081], [-0.2640], [-0.3184], [ 0.9954]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g7-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1241], [ 0.1502], [ 0.0394], [ 0.1257], [ 1.3690], [-1.8282], [-0.3530], [ 0.6621], [-1.3240], [ 1.2551]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-True-left-g7-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0728], [ 0.5581], [ 0.1960], [-0.3930], [-1.8265], [ 1.2103], [ 0.6508], [ 1.0136], [ 1.5459], [-0.0596]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[4.6588], [3.4320]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.2732], [ 0.3205]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4668], [ 0.4833], [-0.1228], [-1.1125], [ 0.4774], [ 1.2603], [-0.0270], [-0.0116], [-0.7850], [ 0.1690]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.4981], [ 5.6540], [-1.7288], [ 0.7222], [-0.7087], [-3.9235], [-1.2105], [ 1.0120], [ 1.3986], [ 1.8229]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2975], [-0.7394], [-0.7387], [-0.1274], [-5.0020], [ 3.8692], [ 1.2384], [ 1.6330], [-0.3165], [ 0.1295]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.6597], [ 0.8131], [-2.0727], [ 1.7635], [ 0.2682], [ 4.6444], [-2.2695], [ 1.3582], [-3.3224], [ 3.0026]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4822], [ 1.9899], [ 3.4946], [-3.7145], [-2.2030], [ 5.1317], [-0.2879], [ 4.0455], [ 0.7684], [-0.3467]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8591], [ 1.2500], [-2.9458], [-2.3425], [-2.0940], [ 3.6825], [-4.6074], [ 2.2862], [ 1.5371], [ 1.5451]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5715], [ 0.2835], [ 0.9814], [ 0.9026], [-1.8968], [-0.6352], [-0.1040], [ 1.8033], [ 0.2336]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1346], [-6.8794], [-3.3120], [-0.0183], [ 1.2790], [ 2.2828], [-3.6938], [ 5.7756], [-2.4437]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.6440], [-1.2724], [-2.4345], [ 2.9146], [ 1.9439], [-1.3245], [-3.2645], [-0.8071], [-4.2782], [ 1.3942]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.5986], [ 3.4801], [ 0.3119], [-1.7241], [-3.2270], [ 0.5810], [ 3.2933], [-0.3457], [ 1.1472], [-0.4184]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.9281], [-4.3147], [-3.3764], [ 1.2308], [ 0.5673], [-1.4180]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0560], [ 0.8844], [-4.5814], [ 0.8390], [ 2.5591], [-1.5845]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.0857], [-1.7225], [ 1.6712], [-2.2457], [-1.2002], [-0.3519], [ 2.4278], [-4.1150], [ 0.8644], [-1.3881]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-none-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7220], [-2.0146], [ 2.4561], [-0.8713], [-2.1352], [-1.2516], [-1.9972], [ 0.7236], [ 1.9621], [ 0.5596]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.2791], [0.5825]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0964], [ 2.9500]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0583], [ 2.3470], [-0.7866], [ 1.3568], [ 0.8919], [-1.4300], [ 0.5106], [-0.4744], [ 1.0346], [-2.2374]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.3297], [-2.7886], [-0.5809], [-1.9665], [ 2.5598], [ 2.1481], [-4.3904], [-2.7997], [-0.1768], [-4.1974]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4439], [ 1.3954], [ 0.1539], [ 1.2657], [ 0.8821], [-0.5206], [-0.3020], [ 1.2846], [ 1.8222], [ 1.1358]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4231], [ 0.6381], [ 0.3237], [ 0.4363], [ 0.2932], [ 1.1918], [ 0.0766], [ 1.7217], [-0.7948], [ 1.6200]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0137], [ 0.1824], [-0.2498], [-0.8405], [ 0.3014], [ 1.3871], [ 0.1037], [-2.2916], [ 0.9056], [ 1.1520]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3257], [ 1.3336], [ 1.2931], [-1.7025], [-3.4846], [ 0.6389], [-0.3032], [-2.0030], [ 2.8290], [ 2.8781]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.7021], [-0.1376], [ 3.4731], [ 2.9127], [-2.6180], [ 0.1374], [-0.3608], [ 0.6077], [-2.8880]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1994], [-0.9301], [ 0.4283], [-1.5556], [ 1.1317], [ 2.6270], [-2.5347], [-1.1605], [ 1.2988]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4245], [ 0.9392], [-1.6195], [ 0.1446], [ 4.7796], [-1.9656], [ 1.5621], [-7.6928], [-4.4500], [ 4.7836]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1779], [-0.3265], [-0.4248], [ 2.1647], [ 1.3665], [-0.5303], [ 1.1173], [ 3.4854], [-4.3648], [-3.4345]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8516], [-0.4969], [ 0.5359], [-0.9045], [ 0.0444], [-0.1248]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.4533], [ 2.8339], [ 1.0259], [-0.5919], [-0.6034], [ 0.9965]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.1756], [ 2.7523], [-0.8097], [-0.8420], [-6.4366], [ 2.2159], [ 6.4790], [ 1.0936], [-3.4727], [ 5.5505]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-both-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3269], [-0.5033], [ 0.1339], [ 0.8003], [ 1.7074], [-0.3194], [-0.5959], [ 0.0968], [ 0.7704], [-1.6596]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 5.9925], [-3.5196]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.1799], [ 0.2628]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.3754], [ 3.0911], [ 0.1195], [ 0.1073], [ 1.6961], [-1.3906], [ 1.4328], [ 2.7308], [-2.5521], [-1.4171]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 4.1112], [ 0.1671], [-0.8932], [-2.9881], [ 1.7521], [ 2.3557], [ 2.5294], [-3.1311], [-3.9932], [-0.0674]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2047], [ 2.4570], [-5.2055], [-0.7385], [ 3.3523], [ 0.4935], [-0.8638], [ 0.5782], [ 2.3556], [ 0.8004]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7345], [-0.4289], [-2.5716], [-3.3592], [-0.6372], [ 0.7610], [ 0.3459], [ 2.3952], [ 0.6428], [ 0.2351]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.6866], [-0.4350], [-0.6574], [-1.2182], [-2.8592], [ 1.7199], [-3.1307], [-2.7521], [ 1.0095], [-2.5968]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.1535], [ 1.9413], [-1.2240], [ 0.2276], [ 6.5071], [-3.7753], [-2.8655], [ 7.9541], [-4.9124], [ 0.4349]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2137], [-1.1257], [ 1.1001], [ 0.7518], [ 0.0788], [-0.8680], [ 0.3136], [-0.0445], [-0.2843]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.1516], [-1.3416], [-1.4448], [ 1.5543], [ 1.1147], [-0.1045], [-5.0929], [-0.9904], [ 2.3762]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5168], [ 0.7626], [ 0.2875], [-1.2311], [-1.3024], [-0.3283], [ 4.3824], [-2.5153], [-4.1696], [-3.4523]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8217], [-5.4374], [-1.0410], [-4.4643], [-1.6742], [ 0.2210], [-2.4247], [-3.1442], [-3.3403], [-3.0721]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.7206], [ 2.2215], [ 1.1442], [-1.4627], [ 1.3266], [-1.4311]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.4069], [ 4.5611], [-2.7442], [ 0.4791], [-2.0195], [ 4.0099]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0743], [ 2.1593], [ 3.2843], [-2.4149], [ 0.5721], [ 2.2278], [ 0.6733], [-1.5654], [-0.3396], [ 3.3128]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-True-False-right-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4400], [ 1.5243], [-0.8774], [ 1.3377], [-2.6963], [ 0.1906], [ 0.0647], [ 1.0922], [-2.9392], [-0.5387]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8563], [-0.3228]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3931], [-0.6176]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4678], [ 0.5233], [ 0.0141], [-0.7330], [-1.8685], [-0.1911], [ 0.3722], [ 1.8142], [-1.1401], [-2.4681]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3560], [-1.1815], [-0.1181], [-1.4934], [ 1.6957], [-0.6899], [ 0.9528], [-2.7247], [-4.4873], [ 2.6908]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8031], [ 1.2550], [ 0.6952], [-0.1406], [ 2.6501], [ 0.6183], [-2.5808], [ 1.0571], [-4.1204], [-0.1672]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7937], [ 0.5374], [ 0.2590], [ 0.9345], [-2.8654], [-0.4766], [-0.4131], [ 1.3669], [ 0.6023], [ 2.3050]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0703], [-0.4418], [-0.4431], [ 0.0808], [-1.0919], [-1.1628], [ 0.5067], [ 1.1246], [ 0.3915], [-0.2787]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6062], [-0.9527], [ 0.0796], [-0.5014], [ 1.2667], [-2.6195], [ 0.2569], [ 2.0142], [-3.8119], [-0.4498]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3015], [ 0.1633], [ 1.4837], [ 0.4720], [ 0.9793], [-0.5012], [ 0.7922], [ 2.1291], [ 1.6809]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2403], [ 0.0828], [ 0.3444], [ 1.4551], [-0.0947], [-0.3771], [-0.9531], [ 0.3497], [ 0.5257]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.5732], [-1.0499], [ 1.1646], [-0.4925], [ 0.8523], [ 0.8395], [-0.3943], [ 0.8308], [ 4.2855], [ 4.0427]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4024], [-0.1478], [ 0.3533], [ 0.4127], [-0.6223], [-0.6949], [ 0.4662], [ 0.4079], [-2.7471], [-2.6547]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9359], [-2.4821], [-1.5221], [ 0.1265], [-1.3538], [-0.4199]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3507], [ 1.2127], [ 2.0942], [-5.9666], [ 4.4595], [-3.6851]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2757], [-0.9710], [ 0.1670], [ 0.1640], [ 1.5066], [-3.3212], [-0.2272], [-1.9565], [ 0.7175], [ 1.1467]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-True-False-left-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6261], [ 0.4137], [-0.5696], [ 1.1727], [-1.2565], [ 3.0911], [ 0.6065], [-2.1797], [ 1.2948], [-0.0312]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2741], [-1.8283]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8816], [-0.2937]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1645], [ 0.0848], [ 0.2466], [-1.3641], [ 0.1756], [ 0.1557], [-4.0597], [-0.6340], [-0.3387], [-0.1986]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4090], [ 0.0678], [-2.1399], [ 0.2398], [-0.7762], [-1.5344], [-0.7439], [ 0.1899], [ 0.8835], [-0.6220]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0891], [ 2.2365], [-0.4261], [-1.1589], [-0.1985], [ 2.1459], [-4.7127], [-1.2597], [ 2.1622], [-0.0168]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1519], [ 0.9184], [-2.2148], [-1.3970], [-1.8682], [-1.0715], [-1.4028], [-0.4557], [-0.6026], [ 0.1913]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.7931], [ 2.6273], [ 0.4962], [ 1.8085], [ 1.7751], [-0.1606], [-1.8477], [ 1.4651], [ 1.4120], [ 0.3117]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5574], [-0.0413], [-0.0748], [ 0.5882], [-2.0757], [-0.3730], [-1.0024], [ 1.0030], [ 1.4917], [ 1.6682]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1926], [-0.4264], [ 1.0866], [ 1.5811], [ 0.1066], [ 0.7125], [ 1.7494], [ 1.3982], [-0.8108]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5678], [ 3.2388], [ 0.8013], [-0.8872], [-0.4117], [-0.1659], [-0.1865], [-1.3458], [-0.5751]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3404], [-2.0442], [ 1.3129], [ 0.6260], [ 0.1636], [ 0.8803], [-0.0936], [-0.3224], [ 0.4327], [-0.2761]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3253], [ 1.1468], [ 0.7463], [ 1.6034], [-0.1102], [-0.7595], [-1.2077], [-2.6908], [ 0.2797], [ 0.7313]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1022], [-0.6165], [-0.1433], [-2.4645], [ 0.1254], [-2.7441]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.0103], [ 1.5464], [-1.5818], [ 1.9563], [ 1.3320], [ 0.8648]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2352], [-1.8008], [-1.0306], [ 1.7420], [-0.6986], [ 0.7388], [-1.4930], [-1.0122], [-0.0268], [-1.4054]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-none-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6487], [ 0.0650], [ 0.5975], [ 1.2566], [ 2.1387], [ 0.2407], [-0.9502], [ 0.2135], [ 1.4931], [ 1.1156]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.1360], [0.0013]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1411], [-0.0186]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5606], [ 0.9242], [-0.0739], [ 0.6071], [ 0.4358], [-0.2436], [ 0.3397], [-1.0675], [-0.2124], [ 0.1771]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8423], [-0.3107], [ 0.3239], [-0.4577], [ 2.7726], [ 0.2495], [-1.5075], [-1.5133], [ 0.9681], [-3.7836]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6221], [ 0.4521], [-0.5972], [ 0.6848], [-0.4029], [ 0.5742], [ 0.4854], [-0.2073], [ 1.4666], [ 0.3348]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1209], [-0.3748], [ 0.7825], [-2.0360], [-0.8154], [-0.1998], [-0.1477], [-1.5384], [-0.2266], [-4.0868]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0512], [-0.3121], [ 1.0154], [ 0.0440], [-0.4233], [-2.1993], [-0.6919], [-1.9257], [-0.1916], [ 0.7977]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2518], [ 0.0537], [ 1.0431], [-0.3329], [-1.2088], [ 1.5997], [ 2.7069], [-0.8020], [-1.7164], [-0.2705]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3441], [ 1.0776], [-0.0985], [ 0.3730], [ 1.2595], [ 0.6266], [-0.9664], [-0.7934], [ 0.8226]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9301], [ 0.0983], [-0.4169], [ 1.0779], [ 0.6027], [-0.2937], [ 1.0076], [-0.0518], [ 0.8604]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4837], [-1.4548], [ 0.1783], [-0.9259], [-1.5109], [ 0.3066], [ 0.2705], [-1.8205], [ 0.0303], [-1.4813]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0921], [ 0.8762], [ 0.2386], [ 0.8030], [ 1.6006], [ 0.1131], [ 0.4768], [-2.3033], [ 1.8319], [ 0.9503]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2742], [ 0.1310], [ 0.7362], [-1.2860], [-0.7826], [ 0.6269]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6872], [ 0.1482], [ 0.0115], [ 0.1228], [ 0.9463], [-1.5334]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4085], [ 0.1970], [ 0.3200], [ 0.2534], [ 0.9936], [ 0.2501], [-0.2540], [-0.0741], [-0.4694], [ 1.8145]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-both-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6366], [-0.5333], [-0.5580], [-1.0957], [-1.1120], [ 0.5713], [ 1.6415], [ 0.5183], [-0.9462], [ 2.0458]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6191], [-0.4900]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1299], [-1.3853]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.0281], [ 1.4903], [ 0.2889], [ 0.6064], [-0.8902], [ 1.4737], [-1.6110], [ 0.5858], [ 0.7845], [-0.6048]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.1182], [ 1.4554], [-0.6427], [ 0.2496], [ 0.8410], [ 1.0551], [-2.5685], [ 2.0532], [-2.9279], [ 1.4171]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.6884], [0.8493], [0.5604], [1.3953], [0.8591], [0.1608], [0.0819], [2.0578], [0.2067], [0.4489]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2370], [-2.1819], [-0.8610], [-1.1385], [-0.0596], [ 0.5057], [-0.0055], [-1.3397], [-1.2025], [ 2.7667]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3413], [ 0.5122], [ 1.0920], [ 1.6761], [-1.7693], [-1.0712], [-1.4349], [ 0.5785], [ 0.2275], [ 0.5018]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8762], [ 0.5858], [-0.8768], [ 0.8515], [ 1.3028], [ 0.4221], [-1.0545], [-1.7212], [ 0.1969], [ 0.5078]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5942], [-0.9213], [-1.8538], [-0.1059], [ 0.9854], [ 0.0384], [ 1.3984], [-2.0510], [-0.3209]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1426], [-0.1658], [-1.1309], [ 0.5738], [-0.4126], [-0.0160], [ 2.0481], [-1.2155], [ 0.4434]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8335], [-1.3160], [ 1.6109], [-0.3918], [-0.8440], [-2.2232], [-0.3663], [ 0.5344], [ 0.0066], [-0.3780]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0456], [ 0.4955], [ 0.0271], [-0.1516], [ 0.3761], [-0.1619], [ 0.0290], [-0.2023], [-0.4218], [ 0.1164]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1038], [-2.3259], [ 0.3958], [ 1.0196], [-0.6899], [ 0.8837]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.6508], [-0.5193], [-1.0715], [-0.8391], [ 0.2794], [-0.6693]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6950], [-0.2065], [-0.4509], [-0.6798], [ 0.5150], [-0.3912], [ 0.6206], [-0.7494], [ 0.2922], [-0.3657]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-True-right-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1652], [-1.5207], [-0.6436], [-0.0827], [-2.0013], [ 0.5217], [-0.6818], [-0.2647], [ 1.8513], [-2.0264]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3446], [-0.5044]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0838], [-0.2615]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1745], [ 0.3051], [ 0.6480], [-0.1718], [-0.7682], [-2.4076], [ 0.4774], [ 0.2362], [ 0.0083], [-0.7007]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0493], [-0.0583], [-0.0821], [-0.1054], [-0.1820], [ 0.2335], [-0.4249], [ 0.0952], [ 0.7118], [ 0.7998]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3018], [-0.2125], [ 0.7850], [ 0.5458], [ 0.7459], [-1.1136], [ 0.5814], [ 2.0761], [ 0.6062], [ 2.4590]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1394], [-0.0749], [ 0.4449], [ 0.1151], [-0.4869], [ 0.3282], [-0.2145], [-0.2971], [ 0.6185], [-0.1619]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2085], [ 0.4731], [ 0.2680], [-0.2533], [ 1.7207], [-0.4825], [ 0.4298], [ 0.4842], [ 1.6548], [-0.1591]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7822], [-0.2133], [ 0.0517], [ 0.6987], [-0.1314], [ 1.6082], [ 0.4153], [ 0.8002], [ 2.3445], [ 0.8257]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2602], [ 0.0735], [ 0.0876], [-0.3472], [-0.2682], [ 0.0468], [ 0.4900], [ 0.1405], [-0.6808]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5235], [-1.3580], [-1.8963], [ 0.9715], [ 1.0745], [ 0.5673], [ 0.7357], [-0.9971], [-1.5654]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1855], [-0.7748], [ 0.1910], [ 0.3177], [ 0.8657], [-0.4116], [ 0.9477], [-1.2624], [-1.3538], [ 0.5417]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5144], [ 0.3216], [ 0.0543], [ 0.0273], [-1.1756], [-0.7328], [-0.0448], [-1.1289], [ 0.9796], [ 0.6761]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0050], [-0.2580], [-0.9398], [ 0.2937], [-1.2709], [ 0.4975]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6143], [ 1.5721], [-1.1196], [-2.5264], [ 0.1103], [ 0.7467]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3258], [ 0.3560], [ 0.9492], [-0.9855], [ 0.7863], [-0.9705], [ 1.3883], [ 1.3993], [ 1.3731], [-0.2480]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[1-False-True-left-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2886], [ 0.0457], [ 0.0378], [ 0.0595], [ 0.0489], [-1.8677], [ 0.8013], [-3.1749], [ 0.1628], [-1.1777]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6256], [ 0.0127]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2338], [ 1.8069]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5880], [ 2.4978], [-1.4408], [-0.1207], [-1.6998], [-0.9828], [ 0.2598], [ 2.5164], [-2.3908], [ 1.8766]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.8280], [ 5.8569], [ 1.2777], [ 7.4900], [-1.2450], [ 0.3720], [-4.7981], [ 1.1046], [-8.3443], [-5.1565]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1779], [ 1.3015], [-1.4283], [ 0.9584], [-0.5161], [ 0.0413], [-0.9351], [ 0.4656], [ 0.4113], [ 0.8690]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6716], [ 0.5132], [ 0.1603], [ 0.3683], [ 1.3922], [-1.5251], [-2.7796], [ 0.8821], [ 1.6174], [-0.2422]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3135], [-0.1689], [ 2.0452], [ 0.2620], [ 2.2767], [ 1.1864], [-1.6842], [-1.0519], [ 2.5822], [-0.5491]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.9692], [ 1.6751], [-1.1200], [-1.6243], [-0.7863], [ 3.0730], [ 0.3680], [ 3.7776], [ 2.0418], [-3.3161]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.9026], [-0.6367], [-0.2877], [-0.9867], [-0.1171], [-0.2924], [ 0.0233], [ 0.0684], [-0.0197]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2223], [ 0.7592], [-0.2608], [ 1.0893], [-2.1606], [ 0.8915], [ 1.3990], [ 1.0384], [-0.6188]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4293], [-1.3417], [-0.3960], [-0.1869], [-1.5616], [-0.2592], [ 0.4396], [ 2.7242], [-1.7930], [ 1.5360]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5532], [-3.3189], [-0.1095], [ 0.0684], [-4.6149], [ 0.7661], [-2.4832], [ 0.2110], [-0.3799], [ 0.7147]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3795], [-2.2513], [ 0.5122], [ 2.1903], [ 0.1735], [-0.8850]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.5292], [-0.5981], [ 0.0168], [-2.7670], [-1.5131], [ 1.1920]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.5097e+00], [ 3.0462e-03], [-2.4908e+00], [ 1.0338e+00], [-7.1890e-01], [-6.6865e+00], [-6.3324e+00], [-2.6962e+00], [ 2.7939e+00], [-2.0413e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-none-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3616], [ 0.7891], [ 0.3393], [-0.0496], [-0.7879], [-0.8507], [-1.0585], [-0.0175], [-0.3280], [-3.0980]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4128], [-0.6501]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1681], [ 2.2083]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1317], [-2.6149], [-0.8724], [ 0.4107], [-0.4706], [-0.7456], [ 0.7287], [ 2.2722], [-2.1142], [-0.6251]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9909], [-0.3135], [-0.6499], [ 0.3716], [ 0.7700], [-1.3601], [-0.0643], [ 0.1055], [ 0.1150], [ 0.2310]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9552], [-0.1748], [ 1.5261], [ 1.1721], [ 3.9686], [ 1.8338], [ 0.2056], [ 0.4766], [-0.7383], [-0.5593]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1012], [ 0.9536], [ 1.4806], [ 0.6625], [-1.8739], [-0.0427], [ 0.0724], [-2.4209], [-0.0159], [-1.4336]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.7105], [-0.4184], [ 0.6777], [-1.9940], [-2.0492], [-3.3511], [ 0.3683], [ 4.2539], [ 0.4026], [-2.3076]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4240], [ 0.9055], [-1.8572], [-2.1598], [ 0.6725], [-0.2615], [-1.1722], [ 0.2581], [ 2.6203], [-2.6329]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2060], [-0.9581], [ 1.6921], [ 0.9782], [ 0.8795], [-1.3817], [-0.3687], [-0.0086], [ 1.3053]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3863], [ 0.3606], [-0.2192], [ 3.7313], [ 2.9594], [-0.9128], [ 2.2394], [ 0.5958], [-1.8183]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6196], [-0.2119], [-0.0526], [ 0.8136], [ 3.2152], [-1.2707], [ 1.3921], [ 2.3172], [-0.2829], [-1.4588]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.3181], [ 0.1270], [-0.4616], [ 1.0863], [-0.6060], [ 1.4466], [ 2.7397], [-1.5046], [-1.4813], [-3.9032]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.2172], [-0.0463], [-2.5675], [-1.8359], [-2.1327], [-1.4903]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7716], [ 0.7275], [ 0.0731], [ 0.7891], [-1.7282], [-0.1440]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4126], [ 0.0344], [ 0.9074], [-0.2892], [-0.2756], [-0.8743], [ 0.8636], [-2.0288], [-0.0805], [-0.6993]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-both-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3166], [ 0.9375], [-0.2964], [ 0.7283], [-1.2417], [-1.9639], [-1.6766], [ 2.0978], [-0.4579], [ 0.4364]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1818], [-1.4928]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.1667], [0.8954]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g1-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3557], [-1.9649], [ 1.5762], [-0.7349], [-1.2568], [ 3.2033], [-1.7478], [ 5.2006], [ 1.0275], [ 4.2759]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g1-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4009], [-1.0127], [-0.4741], [-0.6988], [ 2.2464], [-1.7625], [ 0.4454], [ 0.6608], [ 1.4734], [ 1.4432]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g2-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-4.2680], [-3.7542], [-4.7500], [-2.7509], [ 5.7743], [-0.8923], [ 3.3396], [-1.0490], [-1.6268], [-2.2152]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g2-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.5796], [ 1.6930], [-2.3008], [ 0.3052], [ 3.7987], [ 3.1215], [-3.9673], [-4.6248], [-1.6991], [ 0.7200]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g3-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.5131e+00], [-3.7254e+00], [-1.2404e+00], [ 2.1429e-03], [-3.8855e+00], [-1.1975e+00], [-3.3572e+00], [-2.8301e+00], [-1.4235e+00], [ 3.7847e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g3-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.8195], [ 0.0949], [-0.5304], [ 1.0590], [ 5.3441], [ 1.3328], [-0.5215], [-5.4427], [-5.7357], [ 0.8810]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g4-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6542], [-1.1747], [ 0.8104], [ 3.0780], [ 0.5219], [ 0.4924], [ 0.2044], [ 1.1309], [ 1.7793]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g4-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2994], [ 3.1315], [-3.5103], [ 3.1810], [ 1.1301], [-1.7526], [-2.4679], [-0.0324], [ 0.6423]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g5-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6380], [ 2.5675], [-0.0847], [ 0.8192], [-4.7353], [-0.4515], [ 1.8456], [ 1.8532], [ 2.5695], [-5.2477]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g5-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7983], [ 0.3627], [ 0.0982], [-3.2020], [ 0.7554], [ 2.3019], [-3.0868], [-4.8390], [ 0.1866], [ 0.7385]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g6-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7939], [ 0.5944], [-0.4132], [-1.0590], [ 0.8973], [-1.8916]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g6-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.5444e+00], [-2.4221e-02], [ 1.7397e+00], [-1.0839e-03], [-9.5440e-01], [ 2.0388e-01]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g7-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3730], [ 4.8663], [ 0.3712], [-5.1312], [ 0.1763], [-0.9011], [ 0.7495], [ 2.1125], [ 1.6965], [ 0.6256]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-right-g7-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3824], [-0.0871], [-0.5881], [ 2.2911], [ 0.5024], [-0.7644], [ 1.2086], [-0.8979], [ 0.0146], [ 0.0437]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1921], [-0.4517]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0054], [ 0.5092]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0680], [ 0.7428], [ 1.1853], [-1.2889], [ 2.0872], [ 0.2683], [-0.1059], [-3.1978], [-2.6572], [-1.0698]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5566], [ 1.7554], [ 0.4849], [-0.6584], [ 0.2932], [ 2.4827], [ 1.3432], [-2.1189], [-2.2858], [ 0.0653]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0249], [-0.1851], [ 0.3530], [ 0.5434], [-1.5023], [ 1.1959], [-0.6451], [-3.4866], [-1.0649], [ 3.6353]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3193], [-1.3189], [-0.8487], [-0.8350], [ 2.6867], [-0.3911], [ 1.9851], [-0.4855], [-0.8925], [-0.6102]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0219], [ 0.5216], [ 0.1647], [-0.2298], [-1.8173], [ 0.2477], [ 0.3092], [-0.5891], [-0.1341], [-1.6789]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4430], [ 0.1538], [ 0.0891], [-0.2003], [-0.6354], [ 2.5714], [ 1.4727], [ 0.1955], [-0.1545], [-0.5666]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8142], [-1.0586], [ 0.6165], [-0.6057], [ 4.7728], [-0.1166], [ 1.0948], [-0.0617], [-0.6601]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1415], [ 1.1511], [ 1.8738], [-1.0308], [-0.0978], [ 0.3157], [ 0.4837], [ 1.1207], [ 5.5642]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2673], [-0.9959], [ 0.8056], [-0.3054], [ 2.8140], [-0.6102], [ 0.5465], [ 0.5067], [ 4.2311], [-0.5584]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8379], [ 0.9187], [-0.6382], [-1.5008], [ 2.9517], [ 1.9404], [ 0.8113], [ 3.5415], [-0.2680], [-5.1692]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3851], [-0.2083], [-0.0678], [-3.2051], [ 1.6549], [ 3.8674]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.4206], [ 3.1417], [ 4.1865], [ 3.2696], [-1.9182], [ 0.4619]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3106], [-1.1343], [-0.6437], [-1.1539], [-1.1557], [-1.9793], [-0.2602], [ 1.9469], [-6.1781], [ 2.3204]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[1-False-False-left-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3389], [ 0.2032], [-0.3421], [ 0.4110], [-0.2262], [-0.1069], [-0.1590], [ 0.5783], [-0.7875], [-0.6027]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g0-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3510, -1.2033], [ 2.0941, -0.3792]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g0-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1537, 0.8681], [-0.1864, -1.1034]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g1-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0504, 0.7085], [-3.2649, -0.9280], [-0.5854, -0.2258], [ 1.9170, -1.3022], ...4, 2.0313], [ 0.3720, 1.3165], [ 1.1159, -1.0212], [ 0.1067, 1.9031]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g1-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5379, -0.0469], [ 0.1300, 0.8569], [-1.8163, 2.3030], [ 0.0631, 2.5281], ...1, 1.2044], [-0.6027, 0.1854], [-0.9662, 2.3994], [ 0.7636, 0.0089]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g2-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-9.3544e-02, 9.8692e-01], [-3.5051e-01, -2.9432e-01], [-1.4872e+00, -9.7779e-01], [ ....4125e-01, -4.0228e-01], [ 5.5287e-04, 2.0340e+00], [ 2.2533e+00, 1.2363e+00]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g2-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2253, 1.5063], [ 0.0752, -0.6795], [-0.3140, -0.8215], [-0.6732, 2.0183], ...6, 0.7034], [ 1.1371, -0.2608], [ 0.6084, -0.6646], [-0.0247, 0.2019]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g3-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4706, -1.1936], [ 0.4129, -0.5083], [-1.7008, -1.0320], [-0.6615, -0.7489], ...0, 0.0583], [-0.0284, -0.5668], [-1.1110, -1.4902], [ 0.2536, 2.1908]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g3-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.9247, 0.2337], [-0.7319, -0.4169], [ 0.5557, 0.7236], [ 1.4424, 0.0209], ...3, 0.6530], [ 2.5194, 0.6835], [ 0.5937, 0.6240], [-0.3103, 0.3496]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g4-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2845, -0.1045], [-0.4104, -0.4079], [-0.5282, 0.3112], [-1.4066, 0.4456], ...5, 1.0008], [ 0.0641, -1.2041], [-1.2834, 1.1388], [-1.0793, -0.3818]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g4-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1577, 0.5563], [-0.0793, 1.5517], [-0.1574, 1.6363], [ 1.2792, 0.2119], ...0, -0.8517], [-0.1564, 0.0084], [ 1.3731, 1.1603], [-2.2466, -0.9019]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g5-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2626, -0.3043], [ 0.3865, -0.4808], [-1.5827, 1.1025], [ 0.0557, -0.7595], ...4, 0.3354], [-1.8639, 1.3434], [ 0.0773, -0.7414], [-1.5164, 1.3808]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g5-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6206, 0.7761], [-1.2280, -1.1665], [-1.7329, 0.7397], [ 0.1520, 2.0149], ...0, 2.0764], [ 1.0090, 0.9422], [-0.3521, -1.4957], [ 0.3476, -1.0777]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g6-idtype0] _________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.9730, -0.2990], [-1.7396, 1.9544], [ 1.4887, -2.3518], [-3.3447, -2.5611], [ 2.9909, 0.8722], [ 0.3814, -0.3130]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g6-idtype1] _________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.8895, 3.4297], [-1.4949, 1.9177], [ 0.6202, -0.8719], [ 0.8381, -1.1781], [-1.3992, 2.0032], [-1.2639, 1.7271]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g7-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1506, -0.3841], [-0.8161, -0.0083], [ 1.3171, 1.5105], [ 1.3239, -0.7502], ...7, -1.3016], [-0.7492, -0.0030], [-0.1333, 1.1590], [-0.0298, -0.0459]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-none-g7-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.6887, 0.1179], [-0.6401, 0.0622], [ 1.3569, -1.1112], [ 0.6340, -0.8786], ...5, 0.0714], [ 0.5468, 0.5908], [ 0.7396, 0.6066], [-1.4022, 1.6542]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g0-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1977, -0.1035], [-1.6342, 0.0061]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g0-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0740, 1.3758], [ 0.1760, 0.3614]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g1-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5623, -0.3357], [-0.2300, 0.6438], [ 0.0048, 1.3100], [ 0.8437, -0.1367], ...1, -0.5958], [ 2.6356, 1.5825], [-0.4216, -0.0176], [-1.0709, -1.6192]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g1-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.5604, -0.0900], [-0.4357, -0.2566], [-0.8337, 0.7762], [ 1.5497, -0.5591], ...5, 0.2286], [ 0.1773, 0.2756], [ 0.6831, -0.4208], [ 2.4933, -1.4741]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g2-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0541, -1.0928], [ 0.3927, 0.0262], [-0.0376, 0.4305], [ 0.3495, 0.1998], ...6, -1.3729], [-0.7693, -3.1809], [ 1.6856, 1.1819], [-0.7953, -0.3853]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g2-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4108, 0.1643], [ 0.1074, -0.0914], [-0.4958, -1.3997], [-0.7228, 0.7366], ...9, 0.2833], [-0.4704, 1.6335], [ 2.1157, 0.2978], [-1.7060, 0.6353]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g3-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6307, 1.5040], [-1.5134, -1.2025], [-0.3035, 0.5177], [-0.8012, -0.4628], ...7, -1.1845], [ 0.9159, -0.9337], [-0.6345, -3.0922], [ 0.1970, 1.2617]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g3-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8174, -0.2862], [-0.9613, -0.6586], [ 0.1412, 0.1271], [-0.2842, -0.7089], ...7, -1.7088], [-1.4630, -0.5822], [-0.1760, 0.8594], [-1.3426, -1.6255]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g4-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5354, -1.4531], [ 0.0583, 0.5944], [ 1.0574, 1.1329], [ 1.0087, 1.0953], ...1, -0.2766], [-0.0589, -0.9179], [-0.3779, -0.6678], [-0.4575, 0.3944]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g4-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5246, -0.5166], [ 0.2273, 0.9367], [-0.6031, -0.5982], [ 0.0890, -1.9857], ...3, -0.3360], [-2.7264, 0.9253], [ 1.4066, -0.1912], [ 2.6044, 0.1562]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g5-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4157, -0.8940], [-0.6024, 0.3532], [ 0.4387, -0.0652], [-0.8490, 0.7169], ...8, 0.9790], [ 0.5174, -1.7253], [ 1.2080, -1.1855], [ 0.3793, 0.4778]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g5-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4491, 0.0214], [ 2.2138, -0.3248], [-0.0865, -0.1793], [-0.2736, -0.0441], ...7, 0.9216], [-2.3840, 1.2223], [ 1.7165, -0.7194], [ 0.8718, -0.5475]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g6-idtype0] _________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-6.6117e-01, -3.4301e-01], [ 1.1839e+00, -5.2122e-01], [-1.3259e+00, 5.3978e-01], [ 4.0653e-01, -3.7473e-04], [-1.0350e+00, 5.4503e-01], [-4.0212e-01, 7.9108e-03]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g6-idtype1] _________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4732, -1.3405], [-0.0411, 1.2524], [ 0.8239, 1.5063], [ 0.5826, -0.0376], [-0.6542, -2.1080], [-0.8224, -0.6129]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g7-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 5.1421e-01, -3.7928e-01], [ 9.1219e-01, 6.8574e-01], [-8.7118e-01, -1.0845e+00], [-....8348e-01, -5.1507e-01], [ 7.7130e-01, -1.5213e+00], [ 1.4845e-01, -8.2265e-01]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-both-g7-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0028, 0.0405], [-1.3411, 1.6178], [ 0.0222, 0.5147], [-0.5206, 2.3182], ...9, 0.3672], [ 0.8854, 1.7129], [-1.1327, 0.5161], [-1.3566, 0.5046]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2188, 0.0886], [ 0.4473, -2.1293]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0917, -0.0408], [-1.5218, -1.4041]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7112, -0.1551], [-0.8724, -3.9883], [-0.0189, 0.6087], [ 0.3582, -0.5541], ...9, 1.8897], [ 3.4186, -1.1915], [ 0.5779, -0.3301], [-0.4252, -2.2369]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2917, 1.0198], [-0.4070, 1.9053], [-0.4620, 0.4893], [ 2.3145, 0.1027], ...9, 0.9367], [ 2.5415, -0.1115], [ 2.5031, -1.7738], [-0.4075, 1.2470]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2297, 1.1682], [-2.2343, -1.4137], [-2.0810, 0.0997], [-0.6507, -0.1213], ...0, 0.1398], [ 0.8965, 1.4546], [-0.6434, -1.7759], [ 1.2594, -0.5066]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8156, -1.5560], [ 0.3263, 0.7608], [-0.9443, 0.9623], [ 2.0258, 1.9506], ...8, 0.1300], [-2.4970, -0.4890], [ 1.2778, 0.8813], [-0.1548, -0.8833]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3569, -0.3814], [ 0.1992, 0.8138], [ 0.4044, 1.7560], [ 0.8973, -1.0824], ...1, 0.8195], [-0.8567, 0.2225], [-0.7326, -0.6934], [ 0.3696, -1.4561]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.0490, -0.8254], [ 0.8750, 1.2634], [ 1.3757, 0.6084], [ 0.8813, -0.5433], ...9, -1.1047], [-0.9185, 2.9150], [-0.2230, 0.0739], [ 2.0066, -0.9647]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9710, 0.9731], [ 0.1000, -0.1293], [-1.7260, -0.5521], [ 1.9648, -1.8164], ...3, 0.1450], [ 0.3354, -0.6281], [-0.5788, -0.4707], [-2.5957, -2.6988]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7354, -2.4897], [ 0.5452, 1.8880], [-0.1445, 0.2828], [ 0.1568, -1.5635], ...4, 0.4912], [ 0.1052, 0.7621], [-1.0828, -0.5451], [-0.3358, 1.7221]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.6734, -2.0755], [ 0.9396, 0.0535], [ 0.6185, 0.5499], [ 1.1905, 0.2336], ...9, -1.2314], [ 0.7117, 0.9211], [-1.4289, -0.5696], [-0.3712, 0.2487]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3207, 0.4255], [-2.2391, 0.3768], [-1.3602, 0.7954], [ 2.5255, -2.3910], ...7, 0.0143], [ 0.6623, 0.4566], [ 1.0536, -1.0605], [-0.1507, -2.1614]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2132, 3.1866], [ 1.0515, -0.3637], [ 0.7496, 1.1329], [-0.9516, 1.1370], [ 1.2935, -1.1386], [ 2.1579, 2.3583]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1593, -1.7061], [ 1.5382, 0.0033], [-0.4101, -1.1806], [ 0.5399, 0.8544], [-0.4154, 0.0623], [-0.1891, -0.3187]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4628, 0.3408], [ 0.2185, -1.1777], [ 1.4324, 0.6008], [-0.5232, 0.6096], ...6, -0.9130], [ 0.8207, -0.8987], [-0.3755, 1.1943], [ 1.1619, -0.2422]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-right-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3122, -0.0980], [-0.1539, -0.3802], [ 0.0056, 1.9926], [ 0.1946, -0.3754], ...9, -0.8729], [-0.5135, -0.2962], [ 0.4841, -1.2970], [ 0.3075, 0.0866]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g0-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2218, -0.2219], [-0.0734, 0.1307]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g0-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0209, 0.3161], [-0.0612, 0.1476]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g1-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9545, -0.3654], [ 0.4692, -0.5705], [ 0.2420, 0.0956], [ 0.5942, -0.1102], ...8, 0.4365], [ 0.1804, -1.3885], [ 1.6430, 0.2337], [ 0.5259, -0.1823]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g1-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1867, -0.0226], [-0.5136, -0.3010], [ 0.0695, -0.1557], [ 0.1235, 0.3107], ...6, -0.0823], [-0.0684, 1.1479], [ 0.7049, -0.3347], [-0.1789, -0.2288]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g2-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1032, 0.0531], [ 0.0278, -0.0175], [-0.0903, 0.1643], [-0.0448, 0.2767], ...0, 0.0927], [-0.5068, 1.3612], [-1.2713, 2.4504], [-0.8311, 1.1035]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g2-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3112, -0.2099], [-0.1574, -0.0667], [ 0.2777, 0.2505], [ 0.0402, -0.1744], ...4, -0.3779], [-1.1050, 0.8716], [-0.2265, -0.9129], [-1.7188, 0.1669]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g3-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3624, -0.8149], [ 0.7222, -1.2998], [ 0.2078, 1.1462], [ 1.0498, 0.5678], ...4, -0.0485], [ 1.0299, 1.5393], [-1.0896, -0.0586], [-0.7867, 1.9295]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g3-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0263, -0.0325], [ 0.6689, 0.6167], [-0.1908, 0.0759], [ 0.2869, -0.2454], ...1, -0.3056], [-0.0752, 0.8032], [-1.5357, -0.7494], [ 2.2296, -1.1831]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g4-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7238, -0.1302], [ 0.2632, -0.7747], [ 0.7217, -0.4355], [ 1.3497, -0.1121], ...7, -0.1635], [ 2.6146, 0.9852], [ 0.4407, 0.7338], [-0.7386, -0.6700]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g4-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4028, 0.3682], [-0.1282, 0.5773], [-0.5238, -0.3647], [-0.3389, -0.5345], ...3, -0.3821], [ 1.0693, -0.5560], [ 0.6188, -0.1521], [ 1.0323, -1.3502]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g5-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9231, -0.5061], [ 0.0911, 0.4708], [ 0.5256, -0.3522], [ 0.4151, 0.2415], ...3, -0.1314], [-1.8468, -2.0167], [ 0.4128, -0.5724], [-3.1471, -2.3906]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g5-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1435, -0.0499], [ 0.4408, 0.2236], [-0.2207, -0.0237], [ 0.3001, -0.1068], ...5, 0.6482], [ 2.5116, -1.0981], [ 2.9757, -0.8697], [-1.4709, 0.9189]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g6-idtype0] _________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0788, -1.0705], [ 0.5486, 2.7754], [-0.8486, 1.0115], [ 0.1418, 1.5106], [-0.4816, 0.1798], [-0.3411, -1.4440]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g6-idtype1] _________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3099, -0.8310], [-1.1597, -0.0358], [ 0.1113, -0.0022], [-1.7612, -1.3956], [ 1.5562, 0.3306], [ 0.6760, 0.0045]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g7-idtype0] _________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9007, 0.1032], [-0.4656, -0.3208], [-0.0553, -0.1825], [ 0.3899, 0.0471], ...5, 0.1033], [ 0.8115, 0.2386], [ 0.2441, 0.5742], [-0.8917, 0.3200]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-True-left-g7-idtype1] _________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0221, 0.1607], [ 0.5454, -0.5471], [ 0.6494, 0.6237], [ 0.2013, -0.5390], ...6, 0.4816], [ 1.0953, 1.5182], [ 1.0593, 1.7145], [-1.3243, -0.3539]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.1038, 1.4069], [ 0.3139, -1.4302]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.6935, 2.6284], [-0.2618, 4.5503]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8483, 1.2523], [-3.0163, 0.6798], [-1.8218, 1.4283], [ 0.8501, -0.2874], ...8515], [ 0.0036, 2.8571], [ 0.9199, -1.4428], [-2.5141, 1.1327], [ 1.2865, -0.8480]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6574, -0.4906], [-0.8586, -0.1681], [-0.9011, -0.5774], [-1.9939, 1.3447], ...4813], [-2.6304, 0.1364], [-0.7477, -0.1641], [-4.1026, 0.4028], [ 1.1489, -0.0139]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7921, 3.9901], [ 0.5120, -5.0783], [-0.9904, -1.9665], [-1.2874, 0.7187], ...5316], [-0.4013, 0.5192], [-0.9845, -3.7609], [ 0.6635, 0.4240], [-1.3116, -3.8873]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.7867, -1.1327], [-0.6717, -0.6868], [ 1.0442, 1.4271], [-0.6068, -0.3815], ...5785], [ 3.2830, -3.3188], [-0.9756, -3.0226], [ 1.3609, -0.8369], [ 0.7050, -3.2077]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5593, 1.0231], [ 3.4971, 3.3288], [ 1.0157, -1.2880], [-0.9406, -2.6839], ...6769], [-1.8497, 0.0611], [-1.9347, 0.7612], [ 3.1626, 1.6616], [-0.4295, -3.2889]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2379, 2.5240], [-1.6867, -1.7532], [-1.8167, -5.2403], [ 1.0331, 0.8809], ...6136], [ 0.2817, 2.2451], [ 1.0936, 1.3443], [-0.2824, 2.5660], [-2.0554, -5.0014]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4620, 1.0373], [ 1.5582, 0.2610], [ 0.8856, 0.6735], [-1.4881, -0.1415], ...2056], [ 2.0506, -1.4575], [-3.5042, -0.2549], [-1.4503, 1.9610], [-0.4739, -0.2639]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7493, -4.3459], [ 1.0714, -0.5704], [ 0.2167, 0.7940], [ 0.7217, 1.3489], ...3441], [ 1.3855, -0.8567], [-0.7682, 1.5881], [ 0.8190, -3.2881], [-0.2441, -1.3958]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1822, -1.6228], [-2.3096, -0.4543], [-0.8827, 5.0671], [ 4.8670, 0.0332], ...1868], [-0.9538, -2.0910], [ 0.7860, -1.1361], [ 6.3877, 1.1140], [-3.0291, -0.2933]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1064, 0.6334], [-0.2531, 0.3046], [ 0.2591, -1.1230], [-0.3274, -0.4442], ...3651], [ 1.9288, -1.1200], [-0.7842, -0.6901], [ 0.0570, -0.2404], [-0.3439, 5.1512]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6255, 0.2909], [ 1.3529, -2.3293], [-1.1255, 0.4386], [ 5.7547, 0.0166], [ 1.2109, 2.3299], [ 0.2132, 3.1363]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.7146, -1.0921], [ 4.7896, -0.7883], [ 1.6459, -0.3005], [-1.7993, -0.3793], [ 0.9061, 0.9359], [ 1.8140, 0.4497]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.0171, 2.1117], [ 2.3655, -0.9829], [-1.3327, 1.0677], [ 1.3787, 2.0275], ...6115], [-2.2035, -0.6916], [-4.2630, 4.9994], [-0.1320, -0.1727], [ 0.6567, 0.1052]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-none-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.7799, -2.1808], [ 3.7893, 0.5430], [ 0.5264, 1.9132], [ 2.9576, -1.7494], ...9770], [ 0.7313, -2.3477], [ 3.9454, -1.5775], [-0.2805, 0.6189], [ 4.4893, -3.2523]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.5910, 0.0130], [ 1.7252, 1.3198]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8044, -0.7901], [-0.4420, 0.3551]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7062, 0.6063], [ 0.3307, -0.2482], [-0.6499, 0.4537], [-1.3669, -0.1204], ...8445], [-3.7200, 0.6986], [-2.6939, 2.2762], [-5.0012, 1.3519], [ 3.0317, -0.2037]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1475, 1.1137], [ 1.8885, 0.8081], [-0.7010, 0.8699], [-1.0247, -0.4397], ...6786], [ 0.9208, -0.5519], [-1.2947, 1.7097], [ 2.8764, -3.6228], [-4.5886, 2.0769]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2957, -1.1726], [-0.7298, -1.8187], [-1.8211, -1.5475], [-0.9688, -0.2992], ...6347], [-2.1066, -1.0577], [-1.3008, -2.7194], [ 3.1044, 1.2392], [-1.0657, 1.2425]]) rhs_data = None if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3483, 0.9375], [ 1.0088, 2.6694], [-0.4922, -1.7295], [-0.3963, 0.9942], ...7332], [ 1.5232, 2.3133], [-1.1584, -1.1603], [-1.4227, -2.1046], [ 2.8352, -0.9651]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4533, -0.9787], [-1.0018, 2.5059], [ 0.1557, -1.5101], [ 0.4798, 0.8907], ...6367], [ 0.6822, 3.0508], [ 1.4081, -2.2727], [ 0.1043, 0.4562], [-0.0588, -2.7561]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4683, -0.8528], [-0.2431, -0.0284], [-0.2983, -0.6544], [-1.5729, 1.0841], ...4255], [ 1.3360, 1.6341], [-0.6720, -1.2897], [-2.6432, 4.6038], [ 2.7745, 0.5042]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1542, -1.0466], [ 0.2589, 0.7990], [ 1.1994, -1.0754], [ 1.3226, 1.7767], ...1705], [ 2.7515, -1.6489], [-2.5441, -1.4849], [-2.9065, -0.1691], [ 1.2440, -2.0976]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4624, -0.5149], [-2.4711, -0.0303], [ 1.6209, -0.4715], [-0.0861, 0.3758], ...5926], [-2.2514, -0.2710], [ 3.9819, -0.4650], [ 4.6782, -0.8425], [-2.4292, 0.4130]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.7879, 0.7489], [ 1.1638, 2.0731], [ 1.4356, 0.5307], [ 0.5677, -1.2210], ...8896], [-2.5641, 0.8320], [ 0.2211, 0.0270], [-0.4287, -0.2813], [-1.0749, 1.2159]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4740, 0.8370], [-1.2922, 1.5800], [-0.0455, -0.3925], [-0.3020, -1.4478], ...4211], [-0.6816, -0.7538], [ 0.4064, -0.8560], [ 3.1878, -1.5870], [-0.9618, 1.5481]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.1147, -3.3883], [-0.1674, -1.8532], [ 0.9396, 0.4774], [ 0.4868, -0.1447], [ 0.1077, -1.8286], [ 1.7686, -1.8581]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2536, 0.6840], [-4.7416, -1.9338], [ 5.1740, 3.0828], [ 4.2719, -2.9212], [ 3.3435, 3.2829], [ 5.4536, 0.3366]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6112, -1.0322], [-0.9660, -1.2656], [-0.5568, -0.0170], [-1.0585, 0.0364], ...6750], [ 1.0281, 1.2264], [-1.2922, -0.1678], [-0.7419, 3.4018], [ 0.9181, -2.0621]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-both-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.8728, 2.0561], [-2.8980, -0.9221], [-1.6157, -1.1708], [ 0.1209, 0.1299], ...8515], [ 2.2443, 3.1604], [ 2.2964, 0.3459], [-1.6326, -0.9366], [ 1.8599, -0.2178]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.6409, -0.4236], [ 2.0835, -1.2922]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.6761, -3.1534], [-0.6356, -2.1361]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5273, -0.4669], [ 2.4999, -0.5867], [ 1.9906, 0.0103], [-2.2557, 1.9009], ...7437], [ 0.1476, 1.0344], [-0.3726, 0.2260], [ 2.0140, -1.5441], [ 6.2371, -1.6143]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-4.2971, -2.6336], [-0.7071, -1.9350], [ 0.5419, 0.4215], [-1.0123, -0.1804], ...1184], [-1.6244, 2.1048], [ 1.8255, 1.1557], [ 0.2511, -1.4123], [-0.0599, 0.1206]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.6189, 0.7960], [-2.8833, -0.0803], [ 1.5625, -2.1088], [-4.8389, -0.5395], ...4260], [-0.8576, 0.3372], [-2.9468, 1.1621], [ 0.9221, -2.5095], [-2.6276, 0.8042]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9061, -1.3634], [ 1.9882, -1.5510], [ 0.9047, -1.5794], [-3.6180, 0.5518], ...8422], [ 0.7737, -1.0510], [-0.2739, -2.1356], [ 1.9877, -1.3571], [ 1.5729, 0.3260]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6694, -2.2023], [-1.0809, -0.3481], [ 1.8799, 1.6363], [ 0.4183, 2.9646], ...6454], [ 1.2242, -0.1957], [-1.3460, 0.4409], [ 1.7706, 6.4919], [ 0.9262, 3.8824]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 8.3340e-01, -7.0120e-01], [ 2.2028e-01, 2.3017e+00], [ 2.2716e-01, -6.9125e-01], [-...0870e+00], [-1.1036e+00, 5.4663e-01], [ 5.8810e-01, -5.8195e-02], [-1.7244e+00, -2.2836e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4139, 0.5883], [-0.0606, 2.2650], [ 0.6067, -1.3302], [ 0.5640, 0.7494], ...1316], [-0.5894, -0.1608], [ 0.9895, 0.8452], [ 1.4778, -0.3236], [ 0.5015, -0.9081]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.4592, 1.8212], [ 0.8609, 1.1843], [ 0.4434, 1.9594], [ 3.4160, -1.6861], ...4611], [-2.6194, -3.4491], [ 3.4010, -3.1073], [-2.5688, -4.4059], [ 2.8482, -2.4701]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ -0.3845, -3.5402], [ 0.6934, -10.1749], [ -0.2345, -1.1569], [ -0.1608, -3.2896]... [ 3.1565, -1.2959], [ 1.6170, 5.8598], [ -2.6985, -1.2090], [ -0.1040, 3.3796]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.6372, -2.1168], [ 0.9317, -2.3713], [ 1.3497, -4.0589], [-0.3685, 2.1150], ...1984], [-0.2971, -0.8895], [-2.4547, 0.9126], [-0.0194, 0.3901], [ 0.7676, -0.5656]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8322, 1.1402], [ 0.7285, -1.7066], [ 0.0521, -1.4431], [-1.8780, 0.5513], [ 1.4213, -1.9293], [ 1.2500, -1.7972]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.6300e+00, -1.6471e+00], [-2.6725e+00, -4.6986e-01], [ 1.6736e+00, 2.4942e-03], [-1.3288e+00, -1.6191e+00], [-2.5534e+00, -1.1186e+00], [ 2.8410e+00, -2.0453e-02]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6823, -2.2873], [ 4.3929, -5.4634], [ 2.8950, -2.5847], [ 1.4286, -0.2859], ...3409], [ 0.8874, 0.4650], [ 1.4367, -0.9764], [-1.0290, 0.4248], [-0.7761, -0.9615]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-True-False-right-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1978, 0.4316], [-3.5302, 2.0848], [ 0.7262, 1.2848], [-1.7018, -3.2592], ...5650], [ 1.4861, -1.4744], [ 2.4296, 2.7572], [ 2.0522, 5.6106], [-1.5731, -6.0252]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1612, -1.2319], [ 0.0279, -0.1106]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3059, 0.6040], [ 1.1540, -0.1718]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1738, -0.3887], [-0.4824, 0.9478], [-0.6532, -0.2325], [ 0.3343, -0.2724], ...4535], [-0.8336, 0.5149], [-0.1196, 0.0824], [-1.5326, -0.3856], [-2.1580, -0.5017]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8681, -0.6669], [ 1.1011, 0.1897], [ 1.0957, -0.5683], [ 0.0364, 0.1059], ...9878], [-0.3603, 0.0948], [-0.5775, -3.8863], [ 0.2336, -1.3953], [-0.9510, -0.2164]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4897, 0.5152], [-0.9188, -1.1178], [-0.7001, -1.2068], [ 0.3053, 0.4037], ...2412], [-0.4948, -0.2743], [ 0.5543, 1.1961], [-0.2504, -0.6629], [-1.5702, -2.3218]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0463, 0.2475], [-2.1773, 2.0342], [-0.3853, -0.3070], [ 0.4544, 0.1622], ...6741], [ 0.5703, -0.4988], [-0.2645, -0.3946], [ 0.5758, -0.4398], [-3.6519, -1.1515]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0906, 0.1576], [ 0.2463, -2.6585], [-0.0829, -0.2686], [-0.1055, 0.0452], ...8184], [-0.2336, -0.1893], [-0.4390, -1.1796], [ 2.6992, -2.0916], [-0.6708, 2.1548]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1176, 0.5127], [ 0.4645, -0.7096], [ 0.1789, -0.0114], [ 0.0430, -0.7143], ...0883], [-0.3608, 0.0635], [-1.2289, 0.2724], [ 0.8642, 0.5051], [-2.2392, -0.8838]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3894, -0.0277], [ 1.0413, -0.7152], [-0.0575, 1.2355], [ 0.1051, 0.1201], ...5244], [ 0.0978, 0.5693], [ 0.7219, -3.7471], [-0.7420, 1.6046], [ 0.3740, 3.2628]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6297, 0.7194], [ 1.6322, 0.2568], [-0.2841, -0.4658], [-1.5503, 3.0142], ...8766], [ 0.6531, -1.2134], [ 1.3474, -2.4522], [-0.2198, -1.4678], [-0.7074, -1.7778]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3027, 0.3270], [ 0.7443, -1.0781], [ 0.0990, -0.3217], [ 0.2598, 0.2043], ...0300], [ 0.0600, 0.4867], [-2.7074, 1.9382], [ 0.9198, -0.6707], [ 0.8896, -0.3577]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4634, 1.1836], [ 0.4213, -0.0968], [ 0.2719, -1.0563], [-0.1251, -0.3633], ...8906], [ 0.9060, 1.4242], [-2.9295, -1.7370], [ 0.3232, -2.2960], [ 0.2429, -0.6271]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0149, -1.8837], [-0.5751, 0.7855], [ 1.4727, -3.9272], [ 3.0657, 2.2302], [-5.2185, 1.5652], [-2.4637, 3.4981]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.7905, 4.0623], [ 0.7270, -2.6030], [-0.3949, -2.0875], [ 0.8102, -2.0254], [-0.6968, -2.1203], [ 5.1764, -1.9141]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4590, 0.0446], [ 0.5201, 0.2081], [ 1.8642, -1.9827], [-0.3731, 0.0275], ...3855], [-1.8288, 1.5983], [-0.2209, -2.1816], [-2.2623, -0.2227], [ 1.3818, -2.1635]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-True-False-left-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4682, 0.3187], [-1.1005, -1.5539], [ 0.3365, 0.3485], [ 0.1688, 0.4762], ...7695], [-1.1772, 0.6615], [ 2.5630, -3.0408], [ 3.0410, 1.3100], [-0.3057, -0.9149]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7215, -0.6009], [ 1.9219, -0.0229]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[3.3094, 1.9528], [0.6143, 0.4605]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8285, 1.4804], [ 1.5547, 1.3076], [-0.1444, 0.4262], [ 0.7904, 1.4070], ...4, -0.8732], [-0.2363, -0.9765], [-0.0720, 0.0690], [-0.7991, 2.3597]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5415, -1.0146], [ 1.1876, 0.1628], [-0.4105, -0.3801], [-2.7933, 2.1704], ...6, 0.6673], [-0.5348, 2.9953], [ 0.2051, 2.4254], [-0.0767, 1.9851]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.4003, 0.3144], [ 0.2791, 1.1120], [-0.1074, -0.9436], [ 1.2608, 0.8348], ...5, -1.1479], [-0.8949, -1.1987], [-1.0666, -0.4102], [-0.9425, -2.0894]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4309, 0.6914], [-0.3385, -0.5028], [-0.5440, -0.3276], [-0.1008, -2.3175], ...8, 0.8096], [-0.5949, -0.8338], [ 0.6148, -1.2220], [ 0.1815, 0.1309]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8029, -0.6757], [ 1.5028, -0.3046], [ 0.6050, -0.8295], [ 0.8183, 0.6606], ...5, -1.7407], [ 1.7118, -1.6028], [ 2.6061, -2.2420], [ 3.2592, -2.0647]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0547, -1.3965], [ 0.6742, 1.0604], [ 0.1934, 0.1680], [ 0.5959, 1.0136], ...9, 1.3723], [ 1.3612, 0.2100], [ 0.3638, 0.6517], [-0.2317, -1.1088]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.1439, -0.7161], [-1.1656, 1.5154], [ 0.5351, -0.4519], [ 0.5981, 2.1657], ...6, 1.9419], [ 0.6386, -0.2186], [ 0.3984, 0.0559], [-1.5289, -0.0538]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4237, -0.2309], [-0.0696, 0.1275], [-0.7958, 0.7236], [ 0.5045, -0.2455], ...5, 0.2718], [ 0.9369, -0.3930], [ 0.1960, 0.1728], [ 1.2886, 0.5504]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1341, -1.6704], [ 0.5439, -0.2395], [ 1.5001, -1.1185], [-1.4062, 1.8230], ...9, -0.1774], [ 0.0616, -0.0739], [ 2.6979, -1.7979], [-0.1963, -0.2793]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2082, 3.1299], [-0.8607, -0.1224], [ 1.1199, -1.7766], [-0.8695, 1.0439], ...7, -1.9231], [ 0.5083, 2.8360], [-0.1257, -1.1029], [-0.5343, -0.7671]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5500, 1.8999], [ 0.2105, 2.2328], [ 0.6470, 0.5004], [ 0.0872, -0.0024], [ 0.4570, 2.3549], [-0.4528, -0.6453]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3161, -2.7572], [-0.2388, -1.6739], [-1.0710, 1.2060], [ 0.1586, -0.4608], [ 0.1750, 1.5626], [-0.7186, -0.9135]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0305, 1.3547], [-3.4451, 1.8640], [ 3.0705, -1.0544], [ 1.4712, -0.0076], ...8, -0.0178], [ 3.2313, -0.3028], [ 1.8590, 0.4222], [ 1.7356, -0.2823]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-none-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6157, 2.5450], [-0.3667, -1.1611], [ 1.2605, 0.8783], [-0.7846, -0.4191], ...8, -0.0520], [-0.6632, -1.0145], [-0.6582, -0.2944], [-0.7664, -0.0028]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0389, 0.6278], [-0.2035, 0.3963]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2074, -0.2167], [ 0.4653, 0.1210]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4648, 0.5279], [ 0.5429, 0.0861], [ 0.6025, -1.0866], [-0.8314, 0.7938], ...4, -0.5960], [ 0.5076, 0.1413], [ 0.6201, -0.0464], [ 0.0058, -1.0342]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0051, 1.1937], [ 0.6452, 2.1683], [ 0.2150, -0.9020], [-0.6454, -1.3557], ...3, 2.0202], [ 0.5537, -1.8213], [ 1.3704, 1.6704], [ 0.7406, 2.0072]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1193, -0.5407], [ 0.3473, -0.7138], [ 1.2882, -0.7455], [-0.4869, -0.2624], ...5, -0.4558], [ 2.2607, -1.5854], [ 1.8564, -0.5718], [-0.4792, 1.1571]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3290, -1.0120], [ 1.1802, -0.4449], [ 1.2090, 0.3047], [ 0.6122, 1.0983], ...6, 0.5165], [ 1.2386, -1.9762], [ 0.6690, -0.2755], [-0.6709, -0.1255]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1603, 0.7189], [ 0.5542, 1.9043], [ 0.0085, 0.2670], [-1.8678, -0.9349], ...6, -1.8488], [-1.1848, 0.3490], [ 0.3292, -1.9966], [ 0.2795, 0.0484]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 4.2825e-01, 4.3939e-01], [ 1.3273e-03, -6.4144e-01], [ 6.6103e-05, -6.5769e-02], [-....9850e-01, 7.4838e-01], [-2.6344e-01, 6.1650e-02], [-5.4277e-01, -9.2936e-01]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6820, -0.4490], [-0.6534, 0.4218], [ 0.0755, -0.7775], [-1.9106, 0.0538], ...2, -0.3121], [-0.2377, -0.1086], [ 1.1542, -0.0073], [-2.8631, 0.0444]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1875, -0.4811], [ 0.0093, -0.4355], [-0.1616, 0.5608], [-0.1373, -2.3742], ...9, 0.4740], [ 0.8071, 0.3131], [ 0.5745, 1.2973], [-0.0358, 0.7617]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1346, -0.0168], [-0.4085, 0.1094], [-0.4616, -0.0596], [ 0.4259, -0.4920], ...4, 0.0488], [ 0.3606, 0.7240], [-0.3028, -0.1124], [-0.3392, -0.4966]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8723, -0.1084], [ 0.3939, 1.5386], [ 0.7282, -0.4870], [-0.2768, -0.6656], ...9, -0.6047], [-0.2017, 1.0673], [ 0.1335, -0.2963], [ 0.3581, -0.2529]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4348, -0.4335], [-0.5490, 1.0289], [-1.0261, 0.9606], [ 1.2647, -1.1168], [ 0.6674, 0.8540], [ 2.1294, -1.5822]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1681, 1.7924], [ 0.3594, 0.1926], [-0.9354, 1.6331], [ 1.0434, -0.1649], [-0.0536, -0.3749], [ 1.6989, 0.3665]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8653, -0.5314], [-0.2706, 0.2322], [-1.1174, -0.1664], [-0.4866, 0.5918], ...1, -0.3811], [ 1.8798, 1.7384], [ 0.3685, -1.4455], [-0.3096, 1.5833]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-both-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8377, -0.1740], [-0.7186, 0.3387], [ 0.3076, 0.4532], [-0.0313, 0.3506], ...1, -0.5528], [ 0.1399, 1.0331], [ 1.3664, 2.0834], [ 0.0207, 0.7577]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4240, -1.1366], [-1.5470, 0.6913]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.0451, 3.5827], [-0.2589, 2.9975]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6752, -2.5473], [ 0.3526, 0.1735], [ 0.3501, -0.0681], [ 0.2961, 0.7414], ...4, 0.5054], [-0.6150, -0.6288], [ 1.1324, 1.4153], [ 0.2962, 1.1950]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7639, 0.4116], [-0.2727, 2.5790], [-0.2827, -0.6160], [ 0.3848, -0.6200], ...6, -0.2237], [-0.1212, 0.1175], [-0.1570, -0.4060], [-3.8527, 2.4802]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2529, 1.0187], [-1.6230, 0.1570], [-0.6154, -0.7657], [ 0.5829, 1.0349], ...6, 0.7011], [ 0.7373, -0.8511], [-0.0322, 0.4361], [ 0.1401, -0.9457]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0116, 0.3293], [ 0.4266, 0.7164], [-2.6472, 0.2417], [-1.2765, -0.3309], ...9, 0.8889], [ 0.2066, -1.2834], [-2.2381, 1.9153], [ 1.3437, -0.1256]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0303, -0.8647], [-0.8605, -1.6043], [ 0.7242, 0.3198], [-0.3977, -0.7955], ...9, -0.3032], [-0.5269, -1.7994], [ 0.1449, -1.4156], [-1.7704, -3.1813]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9742, -0.5087], [ 2.1963, 0.4683], [ 0.2532, -0.0683], [ 0.0532, 0.4702], ...8, -0.0922], [-2.2510, -0.9574], [-0.1354, 0.0443], [-0.3814, -0.6129]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5364, -1.2618], [-0.5551, 3.1570], [-0.1650, 0.9120], [ 0.6708, -0.8039], ...5, 1.3847], [-0.6832, 1.9946], [ 0.7778, 0.4979], [ 0.3069, -0.8330]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6919, -0.9632], [-0.1232, 0.4907], [ 1.3205, 0.7169], [-1.1889, 1.4029], ...3, 1.3755], [-0.8241, 0.8603], [-0.6761, 0.2074], [-0.0864, 0.9357]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8144, 0.7840], [ 1.1890, -0.6397], [ 0.0350, 0.0711], [ 1.7231, -1.1851], ...6, 0.7389], [ 1.5516, -0.3109], [-1.1854, -0.0660], [ 1.6333, -0.0695]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3191, -0.7308], [ 0.0545, -0.4415], [ 0.0436, -1.2048], [-0.5730, -2.0932], ...3, -2.1833], [-1.5842, -1.0602], [ 0.9245, -0.6170], [-2.4345, 1.1601]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4931, 0.7138], [-0.4935, -0.6949], [-1.8244, -0.3302], [-0.8621, 0.2541], [ 0.3985, -0.6081], [ 1.4703, -1.0107]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2967, -0.7216], [-0.2002, 1.1152], [-1.0998, 0.2813], [ 1.0353, 1.8115], [ 0.9512, 1.7621], [ 0.2625, -0.6149]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2494, -0.1231], [ 0.5814, 0.2966], [-1.5415, -1.7939], [-0.5797, -0.3784], ...9, 0.3132], [ 2.2392, 2.7090], [-0.0607, -0.3020], [ 0.3496, -0.0245]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-True-right-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5918, -2.1120], [ 0.1882, 1.9812], [ 0.5627, 1.6168], [ 0.0852, 1.0712], ...3, 0.5160], [ 1.2522, 0.9576], [ 0.5239, 0.0407], [-0.7875, 1.1625]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3050, 0.2297], [-0.2176, 0.3102]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1752, -0.1806], [-0.6860, -0.0584]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 4.8981e-02, -4.4104e-01], [-3.8759e-01, 4.3823e-01], [ 3.1159e-02, -1.5059e-01], [ ....1572e+00, 1.1781e+00], [-1.2046e-01, 2.9382e-01], [ 1.8282e+00, -6.3055e-01]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1617, -0.2199], [ 0.0979, 0.5250], [-0.0620, -0.1346], [-0.1614, 0.0690], ...1, 0.4598], [-1.4263, -0.0452], [ 0.6341, 0.7389], [-0.1900, -0.7948]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3375, 0.4805], [-0.4332, -0.3284], [ 0.0827, 0.2150], [-0.0601, -0.1557], ...3, -0.2872], [ 1.3127, -0.8356], [ 1.4305, -1.2473], [-2.5683, 0.9713]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3082, -0.2826], [ 0.1624, -0.3350], [ 0.3857, -0.5098], [-0.3554, 0.3339], ...0, 0.4494], [-2.1937, 2.1234], [ 0.5412, -0.8792], [ 0.8849, -1.0831]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0160, 0.0772], [-0.1781, -0.1339], [-0.4327, 0.4704], [-0.5369, 0.0239], ...0, 0.0135], [ 0.3294, -0.7339], [-1.0114, -2.6865], [ 0.8242, 0.1741]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4314, 0.0689], [ 0.0121, -1.4098], [ 0.1987, 0.7428], [-0.7349, -0.2078], ...6, 0.7898], [-0.5178, 2.1117], [ 2.3365, 0.2057], [-1.0874, 0.2481]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0633, -0.5293], [ 0.0563, -0.1972], [-0.3883, 0.5232], [-0.1065, 1.4957], ...0, -0.1866], [-1.2393, -1.7269], [ 0.7289, -0.1232], [ 1.1433, 0.6491]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9231, -0.0979], [ 0.7230, 0.1390], [ 0.4874, -0.5770], [-0.0884, -0.6442], ...9, 0.2313], [ 0.2447, 2.5220], [-0.0604, -1.5192], [ 0.4670, 0.4919]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3340, -0.5855], [-0.5381, -0.4496], [ 0.3098, -0.3713], [-0.2592, -0.2249], ...2, -0.8831], [ 0.7138, 1.9002], [ 1.0691, -1.2688], [-0.4363, 1.8772]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0972, 0.1028], [ 0.0324, 0.8732], [ 0.0306, 0.0273], [ 0.0989, -0.0242], ...2, 0.8732], [-1.9474, -1.4611], [-0.8505, 2.6139], [-0.3851, 1.4261]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6894, 0.2529], [-0.0468, -0.0838], [-2.9570, 1.2054], [-1.8181, 0.8335], [-1.1607, -0.9860], [-1.2959, -1.5112]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1842, -0.5037], [-0.1779, 0.3079], [ 0.3610, -0.2555], [-1.3150, -0.2715], [ 0.8261, 1.6040], [ 1.5260, 0.3522]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0089, 0.1631], [-1.1527, 0.2998], [ 0.0789, 0.2814], [-1.1719, -0.0186], ...8, -0.2912], [ 0.4564, -1.2632], [-0.7116, 0.2205], [ 0.7336, -0.1895]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_graph_conv[2-False-True-left-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: > h_out = conv(g, h) tests\pytorch\test_nn.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0175, 0.4816], [-1.3499, 1.0579], [ 0.0284, 0.2270], [-0.1736, 0.8167], ...3, -0.4947], [ 1.8826, -1.3539], [ 0.9728, 0.0144], [ 0.0260, 0.5413]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[2.5473, 0.1615], [3.5480, 2.2929]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8419, -1.6728], [-4.8046, -1.7068]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7178, 1.6328], [-1.7075, 2.9108], [ 0.4732, 0.9238], [-0.8957, -0.9904], ...2658], [ 2.4376, -2.7762], [ 2.7264, 3.0805], [ 1.0320, 0.3583], [-1.6805, 1.8533]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.4063, -0.2400], [-0.4425, -0.3523], [ 2.0322, 0.6887], [-0.8212, -1.9006], ...5500], [ 1.2800, 0.3213], [ 3.4586, 0.0717], [ 6.3578, -0.8477], [-0.6974, 0.3242]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7053, 0.9560], [-1.3994, -1.0515], [-1.6046, 0.5246], [ 1.1110, -1.2590], ...8153], [-0.6104, 0.2701], [ 1.6053, -0.2188], [-2.3104, -1.1331], [-3.5968, -1.1184]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8718, 2.2248], [-0.8780, 1.3736], [-0.9603, -0.6239], [-2.4316, 0.8264], ...5128], [ 2.7869, 0.5676], [ 1.0615, 1.0202], [-0.4034, -0.6049], [-0.3782, -2.4210]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7611, -5.2887], [ 0.5501, -0.7775], [-1.1504, -3.9606], [-0.4306, 0.3241], ...9489], [ 0.1746, -0.0360], [-0.9172, -1.8499], [ 0.1969, -1.1318], [-0.3348, -0.6924]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.6499, 3.3221], [-2.4210, 3.2537], [ 0.9588, -0.8004], [ 1.4358, -2.7597], ...4114], [-3.1661, 5.0269], [ 0.5740, 3.1312], [-2.4129, 2.4621], [-2.3120, -2.6002]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7448, 2.1082], [ 0.0417, 0.1825], [ 0.9927, -0.9574], [-2.1265, 2.3450], ...0187], [ 1.6276, -0.8070], [-0.8368, -0.1574], [ 0.6187, -0.7437], [-0.1563, 0.8012]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0844, 0.3951], [ 0.0107, 3.4678], [-1.9365, 5.7447], [-2.1017, -0.2551], ...7859], [ 2.7314, -2.6363], [-1.3374, 0.1929], [-0.1072, 1.3035], [ 0.1151, 2.3880]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1341, 0.1142], [ 0.4631, 2.6083], [-1.9624, 1.4488], [ 0.0698, -0.0227], ...3957], [ 2.3019, -1.4060], [-0.0903, 2.1176], [ 0.0330, 0.5572], [ 1.4720, -4.0337]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4857, 1.8935], [-1.9614, -2.7994], [ 4.5082, 0.9465], [-6.4295, 2.9452], ...4784], [ 1.3294, 2.8282], [ 1.0420, 0.9507], [ 5.8315, 0.7371], [-4.4010, -1.4021]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-4.1252, 0.0298], [-6.8054, 0.4658], [ 3.5103, -0.5221], [-1.4110, 1.3537], [ 3.2295, -1.4304], [ 4.4501, -0.3283]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6149, 2.7836], [ 0.1117, -0.2028], [-2.4095, 0.7754], [-0.3239, 1.3242], [ 0.0970, -0.3163], [ 0.2447, -1.0941]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2585, 0.2860], [ 0.7690, 0.4950], [-0.2350, 1.2129], [-0.2653, -0.2115], ...7883], [-1.3150, 0.5654], [-1.2211, -3.8940], [-0.0259, 0.0337], [ 2.1553, 3.5246]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-none-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.5139, 0.1272], [ 2.9486, -0.6920], [-2.5253, -3.9679], [-1.3013, -1.8208], ...2864], [ 1.2000, -1.3302], [ 4.3518, -0.3953], [ 3.2044, 2.2777], [-0.6825, 0.4838]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3533, -0.4085], [ 0.0612, -0.3107]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0986, -0.4261], [ 1.4830, 0.7016]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5055, -0.1786], [-0.8816, 0.5487], [ 0.5423, -0.0873], [-0.2318, -1.0297], ...3018], [-4.4126, 0.8305], [-2.1958, 0.8466], [ 0.0978, 1.4832], [ 1.3188, -1.5229]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.7161, 0.9719], [-3.0068, 1.1356], [ 0.1645, 0.1861], [-1.2578, 0.8632], ...9950], [ 1.4026, 0.3198], [-3.6763, -0.2235], [-2.3301, -1.1614], [-0.9197, -1.1399]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8245, 0.5419], [-1.0245, 2.5764], [-0.2129, 1.1025], [ 1.3001, 0.1362], ...6042], [ 0.7959, 1.6844], [-2.7224, 2.8578], [ 3.1837, 1.1586], [ 1.0437, -0.3545]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0814, -0.1032], [ 0.0300, -0.1650], [ 0.4687, -0.4336], [ 0.3893, 0.1429], ...8348], [-0.4999, 0.3089], [-0.5779, 0.9147], [-2.0823, 0.7812], [ 0.1672, -0.6784]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1059, 0.8153], [ 0.2476, -1.0040], [ 0.4853, -0.5580], [-0.5773, -0.3269], ...6296], [-2.1075, -0.0179], [-3.4991, -2.4528], [ 0.4553, 0.3813], [-2.4641, -0.1436]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2843, 0.2721], [-0.2377, -0.4289], [-0.1065, 0.1037], [-0.8728, -0.2120], ...6507], [ 0.0280, 1.0468], [-1.4972, 0.8241], [-0.4449, -0.0504], [-0.1437, -1.1017]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8701, -1.6601], [ 3.0653, 2.0465], [-1.1553, -0.2288], [-4.5052, -2.9616], ...5885], [-0.3941, 1.8723], [-0.0490, 0.8324], [ 1.4819, 0.4298], [-2.3221, -0.5813]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.7461, -0.6506], [ 1.8525, -0.0516], [-0.5321, 1.0190], [ 0.3271, -1.4910], ...4728], [-1.5314, -0.1121], [-1.8178, 0.1759], [-0.4576, -0.4630], [ 1.5889, 1.0866]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0214, -1.1046], [-0.5760, -0.0508], [-1.8635, -0.4690], [-0.5241, 2.0931], ...5362], [ 1.8254, 1.0294], [-1.1921, -3.8580], [-1.3183, 1.8634], [-1.4641, -0.2206]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.8642, 0.0354], [ 0.5945, 0.5043], [ 0.4947, -0.0327], [ 2.1413, -0.5389], ...0137], [ 0.4641, 0.5711], [-1.0487, 0.7903], [ 2.2448, -0.4297], [-1.1508, 1.3551]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7792, -0.9386], [ 1.2711, -1.5208], [ 0.1815, 1.5126], [-0.4756, 3.2999], [-0.1464, 1.6148], [ 0.1345, -1.7093]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1758, 0.4340], [-0.8091, 0.0600], [-2.2226, 2.3948], [ 1.2565, 0.2738], [-4.3683, 0.8923], [ 0.0251, -0.3826]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.7341, -0.5517], [ 3.9555, 1.2566], [ 1.2910, 0.5469], [-2.6206, -0.5164], ...6637], [-2.3861, 0.7074], [-3.4838, 1.4221], [-4.6070, -0.0627], [ 0.0761, 2.4854]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-both-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.3476, 0.8941], [ 0.4025, 2.4219], [-2.3476, 0.7798], [ 1.0799, 2.2725], ...6172], [-0.1561, -1.3979], [ 0.3715, -1.5823], [-2.2653, -0.0685], [ 0.7197, -2.4360]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.6458, -0.4761], [-0.2568, 3.9496]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6614, 2.3261], [-1.1673, -3.7749]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g1-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2938, 1.4717], [ 1.2816, 0.1879], [ 2.1415, 1.3154], [ 2.3745, 1.1183], ...7598], [-3.6644, -2.6267], [-2.3141, -0.7239], [-2.0334, -0.2522], [-0.4541, 0.5085]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g1-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0998, 0.1522], [ 2.5539, 0.5601], [-2.6954, -0.3529], [-1.1412, -0.0461], ...6624], [ 0.5106, -0.6169], [-0.2095, -0.0815], [-1.3676, -0.2454], [ 1.3715, 0.6018]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g2-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1656, 2.8663], [ 0.1766, -1.8682], [-1.1478, -0.4058], [-0.7478, -0.5165], ...4722], [ 0.8284, 0.2661], [ 2.9640, 1.0675], [ 0.7402, -0.0840], [ 0.4492, -0.4228]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g2-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.8395, 0.0984], [ 2.2624, 1.6085], [-0.4491, 3.3438], [-1.4750, 3.2735], ...1617], [-2.8793, 0.5710], [-4.0139, -2.5488], [-3.3310, -2.0883], [-2.4604, -0.0887]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g3-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2971, -0.8830], [-1.9560, 2.3729], [ 0.8491, 3.3576], [ 0.8262, 0.9050], ...2257], [ 7.8312, -1.5554], [-3.7430, 1.9351], [ 1.8630, 1.7098], [ 2.6070, -1.3754]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g3-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4388, 0.2759], [ 1.4506, -2.7567], [ 0.9826, -3.2315], [-3.1449, 3.2906], ...6432], [-0.0799, -0.5410], [-0.0877, -0.5883], [-0.9023, 0.9301], [ 0.5053, -0.4283]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g4-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0586, 0.2199], [ 0.4763, 1.6019], [ 1.9474, 0.0352], [-0.5688, -0.1214], ...9607], [ 0.4134, -0.6467], [-2.5958, -0.1549], [-1.8618, -1.8620], [ 1.1341, 0.1197]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g4-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' [-0.8282, -4.8018], [-0.0452, -0.1485], [ 0.9104, 1.2502], ...3353], [-0.1639, -4.1885], [-2.4279, 1.4491], [ 0.5827, 2.1371], [ 0.2090, 4.7612]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g5-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.6581, -1.8333], [-0.1267, 0.7648], [-3.2151, -0.0341], [-3.5960, -1.9909], ...4440], [-2.4528, -3.1929], [-4.8574, -1.3313], [ 2.0699, 0.4109], [ 1.0389, 1.3800]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g5-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3250, 0.4656], [ 0.3167, -0.8636], [-0.3144, 2.5902], [-0.2223, 1.2967], ...4113], [-1.7959, 3.2753], [-0.3183, -0.4363], [ 1.0781, -3.3787], [ 0.4418, -0.7608]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g6-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3218, 6.7626], [ 0.8358, 1.3400], [ 1.0052, 7.4336], [-1.7378, 1.1728], [-0.1023, 0.9432], [-0.9876, -1.3587]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g6-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4109, -0.9929], [-1.0476, -0.0363], [ 0.9144, 1.8973], [ 1.4533, 0.5308], [-0.5698, -0.1699], [-1.0061, 0.6821]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g7-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.7628, 4.4383], [-2.5359, 1.5889], [ 0.8011, -0.6141], [-2.8351, -1.9287], ...4708], [-2.3146, -7.5274], [-2.8181, -4.1121], [ 0.5941, 0.1342], [-0.2040, 5.0609]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-right-g7-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.5384, -0.9701], [ 2.6945, -0.0529], [-2.8188, 0.5542], [-4.3822, -0.9978], ...9662], [ 0.0588, -1.2434], [-0.4687, -1.5735], [-1.5605, 0.1412], [ 2.9055, 2.3679]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g0-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3850, 0.2387], [ 0.4232, -0.1218]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g0-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7654, -0.5863], [-0.4024, -0.8502]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g1-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0915, -0.5824], [-0.3154, -0.8315], [-1.3535, 0.7389], [ 0.2907, -0.2688], ...7046], [-1.1573, 1.1132], [ 1.7272, -4.4239], [-0.5765, 3.0220], [-0.7804, -0.5613]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g1-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3865, 0.0834], [-0.5287, -1.0291], [ 1.1623, 0.0241], [ 0.7479, 0.5175], ...0874], [-0.6089, 1.6730], [ 0.4392, 1.4046], [-2.9566, 2.1303], [ 2.5902, -1.5457]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g2-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9227, -0.6885], [-2.0334, -1.5706], [ 1.1277, 1.7209], [-0.4361, -0.3407], ...6836], [ 1.4991, 1.2031], [-0.9522, -4.6665], [-1.2120, -0.0138], [ 1.6207, -0.5767]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g2-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0107e-01, -2.4149e-01], [-5.3020e-01, 1.6443e-01], [-5.8602e-01, -2.0977e-03], [ ...4478e-01], [-2.8991e+00, -2.9585e-01], [ 1.6705e+00, 8.7000e-02], [ 7.0896e-01, -5.2461e-01]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g3-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6853, 0.1328], [-1.7697, 0.5352], [-0.4654, -0.2123], [ 0.4295, 0.6319], ...6685], [ 0.4499, -1.5083], [ 1.7482, -0.3459], [ 5.4792, -2.0370], [-3.5047, 1.8903]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g3-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3934, 0.1329], [ 0.7780, -0.0773], [-0.5195, -0.2756], [-0.7194, 0.8215], ...8474], [-1.0128, -1.4158], [-0.6066, 1.1560], [ 2.2824, 1.5496], [ 3.3647, 0.5634]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g4-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.1560, 0.1863], [-0.3192, 0.1991], [-0.4113, -0.0415], [ 0.5920, -0.0412], ...2683], [-0.5136, 0.2055], [-1.1226, -1.9548], [ 0.3002, -0.3637], [-3.4102, 3.2545]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g4-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3099, -2.4330], [ 0.6095, -0.0643], [-0.1694, 1.5040], [-0.4595, -4.0571], ...8606], [-1.1805, -1.0198], [ 2.5866, 0.7669], [-0.1388, 0.9306], [-2.6197, -2.8633]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g5-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3822, -0.5067], [-0.6998, 1.3621], [ 1.5377, 0.3518], [ 1.2337, -0.0359], ...2095], [-0.9856, 1.8726], [ 1.4337, 2.7913], [-0.8470, 0.9216], [-2.0844, 1.2601]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g5-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.7128e-02, -2.2927e-01], [ 2.2885e-01, 3.2371e-01], [ 1.0472e-01, -5.1541e-01], [-...7833e-02], [ 1.0966e+00, -1.0675e+00], [-3.6253e+00, 5.1798e+00], [ 3.3043e+00, 2.0403e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g6-idtype0] ________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4307, -0.3076], [ 2.0112, 2.4887], [-0.1668, -0.6595], [ 0.8482, 0.1906], [ 1.9205, 3.8002], [ 1.1632, -0.2879]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g6-idtype1] ________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.5379, -3.8132], [-1.6588, -1.1660], [ 0.3637, -1.8675], [-0.6216, -0.0696], [ 3.9410, 1.7853], [-0.2589, -0.4759]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g7-idtype0] ________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3085, 1.2705], [ 0.0701, -1.2509], [ 0.6480, -0.6933], [-0.1034, -0.0729], ...5920], [ 1.4725, 0.7000], [ 2.1536, -5.1331], [ 1.9001, -0.4455], [-2.9530, 3.3822]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv[2-False-False-left-g7-idtype1] ________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) norm = 'left', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv(idtype, g, norm, weight, bias, out_dim): # Test one tensor input g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) if weight: h_out = conv(g, h) else: > h_out = conv(g, h, weight=ext_w) tests\pytorch\test_nn.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2622, -0.1059], [-0.4994, 0.4790], [ 0.3466, -0.3163], [-0.2021, 0.3678], ...3892], [ 0.2216, -0.4702], [-0.1176, 0.4874], [ 0.0395, 0.2552], [-0.6466, -0.1235]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________ test_graph_conv_e_weight[1-True-True-none-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.4877], [ 0.0529], [-0.3414], [ 1.2382], [-0.1430], [ 0.3928], [ 1.2473], [ 0.8224], [-0.9995], [ 0.4580]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________ test_graph_conv_e_weight[1-True-True-none-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.3279], [-0.3252], [-0.8284], [-0.3574], [ 0.1058], [ 0.2615], [-0.0346], [ 0.1276], [ 0.3270], [ 0.0133]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________ test_graph_conv_e_weight[1-True-True-both-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.3330], [ 1.1824], [-1.4395], [ 0.1145], [ 1.0097], [ 1.4174], [ 0.7604], [-0.7124], [ 1.0790], [-0.9422]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________ test_graph_conv_e_weight[1-True-True-both-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.7415], [ 1.2812], [-0.9311], [-0.4101], [-0.0374], [ 0.2462], [ 1.0377], [ 0.7239], [ 2.0212], [ 0.3814]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-True-True-right-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.5052], [ 1.3803], [-1.1570], [-1.1289], [-0.5888], [-1.0042], [ 0.8109], [ 0.2467], [ 1.1267], [ 0.0591]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-True-True-right-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.0505], [ 0.0183], [ 0.0168], [ 0.8952], [-1.0532], [ 1.4961], [-1.0853], [ 0.6983], [-0.5974], [-0.1409]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-True-False-none-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.7455], [-5.2148], [-1.7958], [ 4.3237], [ 2.0967], [-0.6206], [ 2.2373], [-5.3644], [ 2.3700], [-5.5190]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-True-False-none-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-4.6659], [ 0.6509], [ 0.4283], [ 0.0513], [-5.1421], [-0.8258], [-1.4878], [ 0.8601], [ 1.3723], [-0.5258]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-True-False-both-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.7335], [-2.2185], [ 0.7887], [ 0.0138], [-1.7915], [ 1.1615], [ 1.1113], [ 0.5282], [ 1.6656], [ 0.0082]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-True-False-both-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 1.6404], [-1.2775], [-0.7442], [-0.2700], [-2.0586], [-1.2875], [-0.6615], [ 1.0923], [-1.3595], [-2.4035]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-True-False-right-g0-idtype0] ___________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 2.2432], [ 2.9197], [ 1.4772], [ 1.2968], [ 4.6916], [ 1.6431], [-2.4121], [ 5.2912], [-1.0122], [ 1.7813]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-True-False-right-g0-idtype1] ___________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.6073], [ 0.3081], [-0.1906], [ 0.0041], [ 0.5309], [-1.8029], [ 0.0913], [ 0.0575], [-0.6231], [-1.5754]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-False-True-none-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.1043], [-0.0625], [-0.5570], [-0.3847], [ 0.2742], [ 0.1069], [-0.6647], [-0.5342], [-0.2113], [-0.6410]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-False-True-none-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-7.6125e-01], [ 5.4765e-02], [-1.2756e+00], [ 1.2723e+00], [-1.4818e+00], ...], [-8.4316e-01], [ 1.3385e-03], [ 1.5277e+00], [-1.8785e-01]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-False-True-both-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.5332], [-0.5773], [ 0.5188], [ 1.2162], [ 0.3919], [-1.8548], [-1.2726], [-1.0423], [-0.6045], [-2.6664]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-False-True-both-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.2765], [ 0.9435], [ 0.0752], [-0.5624], [ 1.9305], [-0.1518], [-1.7705], [-0.5975], [-2.2535], [-1.5242]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-False-True-right-g0-idtype0] ___________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.4121], [-1.5410], [ 0.4339], [ 0.9566], [-1.8335], [ 0.4357], [ 0.2167], [ 1.7359], [ 1.5783], [ 0.7778]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-False-True-right-g0-idtype1] ___________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.8243], [ 2.3516], [-1.1463], [-0.7258], [-0.9015], [ 0.7196], [ 1.7132], [ 0.0803], [ 0.3772], [-0.7832]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-False-False-none-g0-idtype0] ___________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.7385], [ 0.8706], [ 0.8200], [ 1.8062], [-3.5475], [-3.3429], [-2.2859], [-1.8538], [-0.0371], [ 0.0308]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-False-False-none-g0-idtype1] ___________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-5.4672], [-4.7355], [-0.8634], [-0.9149], [ 0.8999], [-1.5965], [-0.4160], [ 3.4646], [-2.0384], [-3.3900]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-False-False-both-g0-idtype0] ___________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.8375], [ 3.4503], [-1.0103], [ 2.1868], [-0.8784], [-1.6144], [ 1.7861], [ 3.9775], [-0.4818], [ 3.5261]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[1-False-False-both-g0-idtype1] ___________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.4770], [ 1.2469], [-0.9251], [-1.6392], [ 0.4527], [ 1.5740], [ 0.8871], [ 2.5137], [ 0.6273], [ 1.5126]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________ test_graph_conv_e_weight[1-False-False-right-g0-idtype0] ___________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.3293], [-0.1422], [ 1.0020], [ 0.7395], [ 0.0505], [ 0.3893], [-0.3602], [-0.3616], [-0.8091], [-0.1110]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________ test_graph_conv_e_weight[1-False-False-right-g0-idtype1] ___________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.8943], [ 1.8461], [-2.6342], [ 2.2855], [-0.2479], [ 0.3282], [-1.0664], [-1.2323], [ 2.0722], [ 1.3859]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________ test_graph_conv_e_weight[2-True-True-none-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.3852, -0.3228], [ 1.2578, -1.5226], [ 2.8948, -1.4933], [-2.4005, 0.8043], ... -0.4166], [ 0.1285, 0.0803], [ 0.7049, -0.1192], [-0.3140, 0.2866]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________ test_graph_conv_e_weight[2-True-True-none-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.1567, -0.7134], [ 1.4694, 1.6862], [-0.8307, -1.7097], [-0.8201, -0.0612], ... -0.1440], [ 0.2534, -0.3844], [-0.2288, 0.7127], [-1.1561, 0.8944]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________ test_graph_conv_e_weight[2-True-True-both-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.2172, 0.9689], [ 0.9453, -0.9239], [-0.9046, 0.4045], [-1.0842, 0.5299], ... -0.4291], [-0.4016, 0.0277], [ 1.0378, -1.4233], [ 0.4934, -0.3972]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________ test_graph_conv_e_weight[2-True-True-both-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.8267, -0.0437], [ 0.8411, 0.7800], [ 0.6452, 0.0302], [-0.1216, -0.1410], ... 0.7056], [-0.0614, 0.1294], [-1.3993, -0.9046], [ 0.0971, 0.1511]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-True-True-right-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.6204, 0.2217], [-1.4120, -0.7790], [ 3.0222, 1.3503], [-0.4580, -1.0752], ... -1.6044], [ 0.2720, -0.7981], [-0.4857, -0.5967], [ 0.0428, -1.0461]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-True-True-right-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.0895, -1.5226], [-0.0627, 0.5230], [ 1.6304, 2.2730], [ 0.7728, -0.6397], ... -0.7152], [-0.5429, -1.2627], [-0.6952, -1.0449], [ 0.8115, -0.7641]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-True-False-none-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-2.0786, -0.1997], [ 0.9699, 1.0757], [-2.8533, -1.9335], [-3.0849, -3.3806], ...4443], [-3.2135, -4.3409], [-1.8638, -2.4936], [ 2.2427, -0.7416], [-0.9404, -1.0373]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-True-False-none-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.1988, 0.1522], [ 1.2731, -1.7170], [-2.5432, -2.3599], [ 1.8377, 1.1629], ...5060], [ 0.9371, -0.9900], [-0.1461, 0.4561], [-0.9634, 1.4348], [ 2.1179, -1.4838]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-True-False-both-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.9450, 0.6088], [-2.5757, -1.8900], [ 2.9722, -0.0756], [ 2.9765, 0.0258], ...2333], [ 4.2602, 1.0731], [ 0.7015, -1.5368], [ 2.9042, 1.7220], [-1.5094, -0.1385]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-True-False-both-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.1950, 0.5774], [-1.1712, -1.1387], [ 0.1357, 0.5841], [-0.4665, -0.2378], ...2669], [ 1.8534, 1.3137], [-1.5385, -1.2876], [-1.8800, -1.8955], [ 3.2703, -1.0233]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-True-False-right-g0-idtype0] ___________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.6285, 0.0319], [-1.8856, -1.1028], [ 1.0134, 0.5464], [-4.8132, -1.0679], ...0325], [-0.9835, -0.2027], [ 0.6625, 0.1174], [-0.6421, 0.5773], [-0.4712, -1.3254]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-True-False-right-g0-idtype1] ___________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 1.1648e-03, 1.2402e+00], [ 4.3735e+00, 3.7361e-01], [-1.4894e+00, 2.0280e+00], [-...4810e+00], [-2.4275e+00, 3.1603e+00], [-2.5286e+00, 1.1351e+00], [ 3.2538e+00, 1.9230e+00]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-False-True-none-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.3432, -2.0368], [ 0.8998, -0.4630], [-1.3543, -0.8712], [-1.0895, -0.2765], ... -0.6825], [-0.5466, -0.9366], [-1.1563, -0.0949], [-0.1367, 1.1262]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-False-True-none-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.3143, 0.1327], [-1.5208, -0.2701], [-0.5013, -0.4939], [-1.0701, -1.7511], ... -1.7177], [-1.2160, 0.4835], [-0.3627, 2.2071], [-1.8008, -2.3090]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-False-True-both-g0-idtype0] ____________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.0308, 0.2073], [ 0.5260, -0.9092], [ 0.2764, -0.1170], [-1.9801, 0.4831], ... -0.1997], [ 1.7295, -0.2561], [ 1.4155, 0.4343], [-0.0364, 0.4197]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-False-True-both-g0-idtype1] ____________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.1727, 0.0463], [ 0.3649, -1.1719], [-0.1976, 1.0955], [-0.6406, -0.1066], ... -2.1732], [ 1.4573, 0.2099], [ 1.0147, -2.0249], [ 0.3322, -1.0338]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-False-True-right-g0-idtype0] ___________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.0594, 0.2220], [ 0.9740, -1.4039], [ 0.6694, 1.1824], [-0.5620, 1.9080], ... -0.1092], [-0.0281, 2.0562], [ 1.3107, 0.5666], [ 0.6256, 0.4200]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-False-True-right-g0-idtype1] ___________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: > h_out = conv(g, h, edge_weight=e_w) tests\pytorch\test_nn.py:117: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.6243, 1.2418], [-0.6975, 0.4106], [-1.2084, 0.4609], [-0.5399, 1.4375], ... 0.8950], [-0.1846, -0.6907], [ 1.3306, 0.8185], [ 0.7482, 0.6952]], grad_fn=) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-False-False-none-g0-idtype0] ___________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-6.3929e-01, -2.1269e+00], [-2.6556e+00, -6.0399e-01], [ 2.4145e+00, 1.2796e+00], [-...7185e+00], [ 2.4583e+00, -1.3883e-03], [-2.8224e+00, -2.2268e+00], [ 1.6536e+00, -2.0760e+00]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-False-False-none-g0-idtype1] ___________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.7281, -0.0294], [ 1.9730, -1.4881], [-1.2524, 0.2038], [ 2.8942, 2.1111], ...8065], [-1.1226, 2.8057], [-1.8537, 1.9522], [-0.7855, 0.1242], [-1.6980, 2.4839]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-False-False-both-g0-idtype0] ___________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.6095, 1.1709], [-2.7083, 4.1018], [ 1.9607, -1.4561], [-0.4401, -1.2685], ...6913], [-0.0910, 2.1820], [-1.7649, -0.9610], [ 0.3822, -1.2950], [ 1.2023, 2.8610]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________ test_graph_conv_e_weight[2-False-False-both-g0-idtype1] ___________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.2520, 0.5590], [ 1.8649, 0.1462], [ 0.6869, 0.2136], [ 0.2423, 1.4714], ...1079], [-1.8201, -0.4105], [-2.7583, -2.3234], [-4.7321, 0.9350], [ 1.6715, -1.8228]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________ test_graph_conv_e_weight[2-False-False-right-g0-idtype0] ___________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 3.7128, 2.3379], [ 1.9219, 1.2300], [-2.6823, -3.2687], [-1.0375, 0.9667], ...7087], [-1.5644, -2.3919], [ 1.4282, 0.3831], [ 2.6540, 0.4025], [-3.1763, -1.5815]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________ test_graph_conv_e_weight[2-False-False-right-g0-idtype1] ___________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) e_w = g.edata['scalar_w'] if weight: h_out = conv(g, h, edge_weight=e_w) else: > h_out = conv(g, h, weight=ext_w, edge_weight=e_w) tests\pytorch\test_nn.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-2.6783, -0.7400], [ 1.5034, -1.0513], [-0.8267, -2.6617], [-0.0163, 3.2188], ...2458], [-2.2590, 2.5459], [ 1.9112, -2.2476], [-2.1824, 3.0716], [ 4.2991, -1.5237]]) rhs_data = tensor([[0.2636], [0.6412], [0.6275], [1.5607], [1.2491], [0.7272], [0... [1.4196], [2.5846], [0.5729], [1.4330], [0.6490], [0.3398], [0.1452]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-True-True-none-g0-idtype0] __________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: > h_out = conv(g, h, edge_weight=norm_weight) tests\pytorch\test_nn.py:142: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.3693], [ 0.7781], [-1.8341], [-0.2539], [ 0.1112], [ 1.1728], [-0.4980], [-0.5293], [ 1.2855], [-0.7784]], grad_fn=) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-True-True-none-g0-idtype1] __________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: > h_out = conv(g, h, edge_weight=norm_weight) tests\pytorch\test_nn.py:142: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.0819], [-0.3482], [-0.2069], [ 1.8858], [ 0.9585], [ 1.1091], [-0.7812], [ 0.8048], [-0.5682], [-0.4612]], grad_fn=) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-True-True-both-g0-idtype0] __________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-True-True-both-g0-idtype1] __________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-True-True-right-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-True-True-right-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-True-False-none-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: h_out = conv(g, h, edge_weight=norm_weight) else: > h_out = conv(g, h, weight=ext_w, edge_weight=norm_weight) tests\pytorch\test_nn.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 1.2051], [-0.4916], [-1.0503], [ 3.5286], [-0.4467], [ 0.5251], [-2.0260], [-1.8995], [-5.0165], [ 0.6717]]) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-True-False-none-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: h_out = conv(g, h, edge_weight=norm_weight) else: > h_out = conv(g, h, weight=ext_w, edge_weight=norm_weight) tests\pytorch\test_nn.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 1.8975], [ 0.9999], [-0.8757], [-2.3468], [-1.3300], [ 1.9645], [-1.6490], [-2.4944], [-1.2059], [-3.4967]]) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-True-False-both-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-True-False-both-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[1-True-False-right-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[1-True-False-right-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-False-True-none-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: > h_out = conv(g, h, edge_weight=norm_weight) tests\pytorch\test_nn.py:142: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.8146], [ 2.1042], [ 3.1334], [ 0.0720], [ 0.2803], [-1.7314], [ 1.5446], [-2.0058], [ 1.4075], [ 1.4717]], grad_fn=) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-False-True-none-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: > h_out = conv(g, h, edge_weight=norm_weight) tests\pytorch\test_nn.py:142: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-2.4526], [-0.1364], [ 0.6217], [-1.0290], [ 2.4242], [-1.2578], [-0.0596], [ 0.4482], [-0.4004], [-1.5342]], grad_fn=) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-False-True-both-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[1-False-True-both-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[1-False-True-right-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[1-False-True-right-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[1-False-False-none-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: h_out = conv(g, h, edge_weight=norm_weight) else: > h_out = conv(g, h, weight=ext_w, edge_weight=norm_weight) tests\pytorch\test_nn.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.9182], [-2.5942], [ 3.7762], [-0.8477], [-1.2193], [ 1.2860], [ 1.5264], [-0.2031], [-1.1550], [ 0.9802]]) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[1-False-False-none-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: h_out = conv(g, h, edge_weight=norm_weight) else: > h_out = conv(g, h, weight=ext_w, edge_weight=norm_weight) tests\pytorch\test_nn.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-2.7913], [-2.9537], [ 5.2398], [ 2.5936], [ 2.1148], [ 1.9507], [-1.4690], [ 3.1176], [ 1.6857], [-0.2630]]) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[1-False-False-both-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[1-False-False-both-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[1-False-False-right-g0-idtype0] ________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[1-False-False-right-g0-idtype1] ________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-True-True-none-g0-idtype0] __________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: > h_out = conv(g, h, edge_weight=norm_weight) tests\pytorch\test_nn.py:142: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.6632, -0.5378], [ 2.6249, 2.1222], [-0.0746, 1.8017], [ 2.2846, 1.3546], ... -1.6471], [-0.9610, -0.2798], [-0.2984, -2.8773], [ 0.9076, -1.1007]], grad_fn=) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-True-True-none-g0-idtype1] __________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: > h_out = conv(g, h, edge_weight=norm_weight) tests\pytorch\test_nn.py:142: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.6765, -0.1248], [-0.1027, 1.0637], [ 0.4872, -0.0853], [ 0.6262, 0.3190], ... -0.1995], [-0.3957, 0.0632], [-0.4042, -0.2038], [ 0.5874, -0.5352]], grad_fn=) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-True-True-both-g0-idtype0] __________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-True-True-both-g0-idtype1] __________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-True-True-right-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-True-True-right-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-True-False-none-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: h_out = conv(g, h, edge_weight=norm_weight) else: > h_out = conv(g, h, weight=ext_w, edge_weight=norm_weight) tests\pytorch\test_nn.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.8320, -2.9323], [ 1.0977, 0.8555], [ 1.4111, 0.6233], [-0.9969, 0.0581], ...3791], [-0.9958, -1.1627], [-1.4568, 0.0343], [-1.2527, -0.9189], [-0.2323, 0.4465]]) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-True-False-none-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: h_out = conv(g, h, edge_weight=norm_weight) else: > h_out = conv(g, h, weight=ext_w, edge_weight=norm_weight) tests\pytorch\test_nn.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 3.5334, -2.9222], [ 0.0948, -1.5191], [-0.1692, 0.9799], [-2.5485, 1.2069], ...3509], [-2.6636, 1.6560], [-0.7663, 3.8942], [ 1.3865, -3.3800], [-1.9038, 3.9313]]) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-True-False-both-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-True-False-both-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[2-True-False-right-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[2-True-False-right-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-False-True-none-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: > h_out = conv(g, h, edge_weight=norm_weight) tests\pytorch\test_nn.py:142: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.8804, 0.9705], [-0.5709, 0.0417], [-0.4043, 0.4932], [-1.0464, -0.6110], ... 0.8335], [-1.9237, 2.2635], [ 1.5175, -1.8448], [ 1.2880, -0.9630]], grad_fn=) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-False-True-none-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: > h_out = conv(g, h, edge_weight=norm_weight) tests\pytorch\test_nn.py:142: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.1116, -0.9953], [ 0.1379, -0.7153], [ 0.4194, 1.2042], [ 0.4357, -0.7326], ... 0.1649], [-0.7040, -0.0801], [-1.7342, 0.7475], [-0.0522, 0.5195]], grad_fn=) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-False-True-both-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________ test_graph_conv_e_weight_norm[2-False-True-both-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[2-False-True-right-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[2-False-True-right-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[2-False-False-none-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: h_out = conv(g, h, edge_weight=norm_weight) else: > h_out = conv(g, h, weight=ext_w, edge_weight=norm_weight) tests\pytorch\test_nn.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.2091, -2.2793], [-4.5884, -2.2817], [-2.0145, 2.3447], [-2.9322, 1.0785], ...8693], [ 0.3129, 2.1681], [ 1.2107, 1.8013], [-1.6702, -2.0279], [ 0.0305, 0.9249]]) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[2-False-False-none-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) norm_weight = edgenorm(g, g.edata['scalar_w']) if weight: h_out = conv(g, h, edge_weight=norm_weight) else: > h_out = conv(g, h, weight=ext_w, edge_weight=norm_weight) tests\pytorch\test_nn.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-4.3074, -2.7269], [-0.9254, -0.4562], [-2.4350, 0.6966], [-3.8353, 0.3013], ...9098], [-2.9923, -2.0561], [ 1.0857, 1.0475], [ 0.2443, -2.8310], [-2.3665, 2.0807]]) rhs_data = tensor([[0.6491], [1.8020], [0.0416], [3.2244], [0.1902], [0.6207], [1... [0.6860], [0.3175], [2.0501], [0.8401], [2.2833], [1.1770], [0.2148]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[2-False-False-both-g0-idtype0] _________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[2-False-False-both-g0-idtype1] _________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[2-False-False-right-g0-idtype0] ________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________ test_graph_conv_e_weight_norm[2-False-False-right-g0-idtype1] ________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...rch.float32), '_edge_weight': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim): g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) edgenorm = nn.EdgeWeightNorm(norm=norm) > norm_weight = edgenorm(g, g.edata['scalar_w']) tests\pytorch\test_nn.py:140: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:123: in forward graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.6491, 1.8020, 0.0416, 3.2244, 0.1902, 0.6207, 1.4090, 0.3025, 1.7357, 0.6755, 0.6860, 0.3175, 2.0501, 0.8401, 2.2833, 1.1770, 0.2148]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[1-True-True-none-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.7172], [ 1.6776]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[1-True-True-none-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8676], [-0.1864]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[1-True-True-none-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1473], [-0.7679], [-1.1437], [ 1.4817], [ 0.1744], [-0.0350]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[1-True-True-none-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0365], [-0.4701], [-0.3467], [ 1.4001], [-0.6322], [-0.2797]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[1-True-True-both-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0853], [ 0.4658]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[1-True-True-both-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1521], [ 0.0932]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[1-True-True-both-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4724], [-0.8164], [ 1.0302], [-0.0519], [-0.2556], [-4.5618]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[1-True-True-both-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7729], [-0.8407], [ 0.8924], [ 0.3327], [-0.6871], [-0.0722]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-True-right-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5966], [ 0.4681]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-True-right-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1633], [-2.1853]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-True-right-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.7069], [ 0.0694], [-1.1921], [-0.6330], [-0.3881], [ 0.8595]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-True-right-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2839], [ 1.7440], [ 0.4472], [ 0.0149], [ 2.3965], [-1.2578]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-none-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4256], [-1.2956]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-none-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.4073], [-5.0885]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-none-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3708], [-0.6346], [ 2.5032], [-0.9559], [-1.5705], [ 1.0833]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-none-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.9922], [-1.9594], [-1.3310], [ 0.3653], [-2.8011], [-3.7338]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-both-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.6280], [-0.5512]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-both-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7553], [ 0.9498]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-both-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1192], [ 0.3118], [-0.9258], [ 0.4978], [-0.3038], [ 0.8208]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-both-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 6.6321], [-0.4705], [-2.7860], [ 0.7577], [ 2.6282], [ 0.7754]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-right-g0-idtype0] ______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.0247], [-1.0844]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-right-g0-idtype1] ______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3362], [-0.3181]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-right-g1-idtype0] ______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0132], [ 0.5334], [-5.4003], [-0.1352], [ 0.6653], [-0.5733]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-True-False-right-g1-idtype1] ______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = True, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1734], [-2.4628], [-0.2079], [-2.8298], [ 1.3201], [-0.3578]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-none-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2656], [ 0.0528]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-none-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9401], [-1.0459]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-none-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5821], [ 2.1218], [ 0.2685], [ 1.2784], [-0.4486], [ 0.7418]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-none-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2911], [ 1.1131], [-1.0148], [-1.0035], [ 2.1734], [ 1.1257]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-both-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.6304], [ 0.6561]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-both-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.6441], [0.5059]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-both-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8914], [ 0.6981], [-0.6616], [-1.3130], [ 0.9533], [ 0.9548]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-both-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0274], [-1.8962], [-1.3701], [-1.1161], [ 2.3365], [-0.8380]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-right-g0-idtype0] ______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4212], [-0.9779]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-right-g0-idtype1] ______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.1444], [0.8866]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-right-g1-idtype0] ______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.0315], [-0.3794], [-0.1204], [-1.6265], [ 0.5929], [ 1.9186]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-True-right-g1-idtype1] ______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.4248], [-1.2117], [-1.2799], [-0.1726], [ 0.1885], [-0.0256]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-False-none-g0-idtype0] ______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6835], [10.3995]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-False-none-g0-idtype1] ______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.4053], [ 0.9820]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-False-none-g1-idtype0] ______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.2426], [ 2.1040], [ 3.4066], [ 1.9417], [ 1.3830], [ 0.2790]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-False-none-g1-idtype1] ______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.4491], [-1.8606], [ 2.4502], [ 0.3883], [ 0.2064], [-1.0713]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-False-both-g0-idtype0] ______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8584], [ 3.1270]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-False-both-g0-idtype1] ______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3032], [ 0.2018]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-False-both-g1-idtype0] ______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4774], [-1.6989], [ 0.4459], [ 1.2344], [ 1.5566], [-1.2642]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[1-False-False-both-g1-idtype1] ______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8424], [-1.3221], [ 1.0961], [-0.1399], [ 0.4930], [-2.1929]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________ test_graph_conv_bi[1-False-False-right-g0-idtype0] ______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.1409], [5.0546]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________ test_graph_conv_bi[1-False-False-right-g0-idtype1] ______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.1464], [-0.3391]]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________ test_graph_conv_bi[1-False-False-right-g1-idtype0] ______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0038], [ 1.9149], [-0.5750], [ 2.8045], [ 2.7590], [ 0.1439]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________ test_graph_conv_bi[1-False-False-right-g1-idtype1] ______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = False, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5053], [-0.2679], [ 0.4639], [ 1.3614], [-0.8184], [ 1.0998]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[2-True-True-none-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0930, 0.2305], [-2.0785, -1.7662]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[2-True-True-none-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8775, -1.6434], [ 0.8330, -1.3285]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[2-True-True-none-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-4.1399e-01, -2.2294e+00], [-1.9270e-03, -2.0184e+00], [ 8.0488e-01, 2.3826e-01], [ 9.4166e-01, 1.3730e+00], [ 4.4610e-01, 2.0979e+00], [-5.2363e-01, -5.9245e-02]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[2-True-True-none-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.6771e+00, 5.5007e-01], [-8.4047e-01, -5.5681e-01], [-1.1456e+00, 4.1625e-04], [-7.0751e-01, -2.2716e-01], [-2.3134e+00, 2.7290e+00], [ 2.6249e-01, -1.1318e+00]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[2-True-True-both-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9775, -0.6751], [-0.9400, -0.4242]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[2-True-True-both-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1069, -0.4628], [ 0.2631, -0.8641]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[2-True-True-both-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1465, -1.9445], [-0.4044, 1.4554], [ 0.0235, 0.4235], [ 1.2292, 1.6411], [ 3.4319, -0.4370], [-0.0541, 0.7545]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________ test_graph_conv_bi[2-True-True-both-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8098, -1.7640], [-0.1075, 0.7422], [ 0.1020, -0.6448], [-1.3473, -2.6438], [ 0.4806, 1.2444], [-0.8714, -1.7638]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-True-right-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8104, -1.8433], [-1.2139, 0.8408]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-True-right-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8547, -1.9630], [-1.2771, 1.1714]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-True-right-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.2864, -1.4859], [-0.8474, -1.1991], [ 0.9095, -0.7096], [-0.6140, 0.0676], [-0.6512, 1.6396], [ 0.9444, -0.6784]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-True-right-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3514, 1.9938], [-0.1824, -0.8924], [ 0.5439, -0.6511], [ 2.4835, -2.1830], [ 0.5989, 1.1135], [-2.7976, 2.6334]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-none-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0237, -1.8177], [ 4.5065, -0.8349]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-none-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6596, -0.2684], [ 2.0552, 0.7941]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-none-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2156, 3.3238], [ 0.2938, -2.3145], [ 0.7559, -1.2751], [-1.2290, 2.2463], [ 0.0164, 1.7052], [-1.9956, 4.5691]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-none-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6501, 2.1923], [-5.3344, -4.8047], [-1.6959, 0.7860], [ 2.6615, 2.1679], [-3.0327, -2.2036], [-0.1518, -0.4141]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-both-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0053, 0.6787], [-0.7160, -0.4141]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-both-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1294, -1.4264], [-0.4919, -1.0274]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-both-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.0916, 0.8384], [-0.5047, -1.8220], [-2.7802, 0.1383], [-0.6142, 1.0951], [ 3.2490, -0.9822], [-2.6834, 0.9883]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-both-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5246, -1.2162], [-1.3252, -0.7198], [ 0.7371, 3.0659], [ 2.7491, 0.3246], [ 0.8689, 2.4123], [ 0.7081, -4.2143]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-right-g0-idtype0] ______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8338, 1.1383], [-1.9977, -2.8742]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-right-g0-idtype1] ______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.0286, -1.9180], [ 1.3014, 2.1747]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-right-g1-idtype0] ______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.7682, -2.0738], [ 1.1994, 1.6682], [ 1.3137, 1.6035], [ 0.7510, -0.5819], [-0.4705, -1.7249], [-0.6571, 3.6041]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-True-False-right-g1-idtype1] ______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = True, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.2966, -4.1993], [-0.2066, -2.5190], [ 0.9365, 3.0353], [-1.1778, -3.0554], [ 1.3121, -1.3380], [-0.0823, 1.8038]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-none-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8490, -1.5220], [-1.5899, 0.6255]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-none-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.8833, 0.1405], [0.7712, 0.7087]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-none-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0339, -0.1905], [ 1.1707, -0.6298], [ 2.6241, -0.6557], [ 2.4237, -0.6214], [-2.5278, 1.8460], [-1.5449, 0.9779]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-none-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.7071, -0.8390], [ 2.1976, -0.2198], [-1.6608, -0.5138], [ 0.5952, 0.6917], [-0.4227, -1.8006], [ 1.7586, 0.6120]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-both-g0-idtype0] _______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4692, 0.9583], [-0.1725, 0.5508]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-both-g0-idtype1] _______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1676, -1.1259], [ 0.3591, 0.7361]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-both-g1-idtype0] _______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1212, -0.4925], [ 0.9728, 1.5921], [-1.6108, 2.4999], [-0.5508, -0.0093], [-0.2067, -0.4359], [ 1.1792, 0.4420]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-both-g1-idtype1] _______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1557, 0.2599], [-0.2249, -0.3826], [-0.9367, -1.2368], [ 0.8575, 0.9160], [ 1.6676, 1.8103], [ 0.2975, 0.6746]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-right-g0-idtype0] ______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6120, -0.3789], [-0.2921, 2.8216]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-right-g0-idtype1] ______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5314, 0.0304], [-0.5313, -2.2308]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-right-g1-idtype0] ______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.7557, -1.0420], [-1.6258, -2.3598], [ 0.0874, 0.9857], [ 1.4396, 0.6630], [ 1.1299, 1.5311], [-0.2805, 1.1451]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-True-right-g1-idtype1] ______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = True, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: > h_out = conv(g, (h, h_dst)) tests\pytorch\test_nn.py:167: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7852, 0.1437], [-0.0886, -0.9989], [ 0.9741, -0.3672], [-0.4484, -0.0274], [ 1.8418, -0.2340], [ 0.0712, 0.7711]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-False-none-g0-idtype0] ______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.1033, 3.0279], [ 0.0574, 1.9923]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-False-none-g0-idtype1] ______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.5429, -1.5642], [ 3.7061, 1.0933]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-False-none-g1-idtype0] ______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6038, 1.2058], [-0.3917, 1.0704], [ 0.5374, 0.2239], [-0.1427, -1.2547], [ 0.8091, -1.2874], [-1.3453, 0.2471]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-False-none-g1-idtype1] ______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'none', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0483, 1.4158], [ 2.3433, -0.7039], [ 0.2960, 0.2901], [-0.0928, 0.7765], [-0.1737, 0.3759], [ 1.3935, -1.5423]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-False-both-g0-idtype0] ______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0818, 0.3127], [-0.8253, -1.2334]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-False-both-g0-idtype1] ______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2622, -0.6107], [-0.8719, -0.7810]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-False-both-g1-idtype0] ______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.8052, -0.4997], [ 4.9514, 0.7642], [-2.9086, -0.9738], [ 1.2751, -2.7894], [-1.1706, -0.5089], [-5.1705, -5.3062]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________ test_graph_conv_bi[2-False-False-both-g1-idtype1] ______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'both', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2583, 1.7689], [ 2.7066, -4.8905], [-1.5381, 2.0689], [-1.6922, 0.7714], [-1.7100, 0.7687], [-0.8533, 0.5723]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________ test_graph_conv_bi[2-False-False-right-g0-idtype0] ______________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 4.5708, -5.4147], [-0.9800, 0.9646]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________ test_graph_conv_bi[2-False-False-right-g0-idtype1] ______________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4051, 1.3072], [-0.8023, -0.3428]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________ test_graph_conv_bi[2-False-False-right-g1-idtype0] ______________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3338, -1.4458], [ 2.0507, -1.7456], [ 3.2306, -4.2193], [ 0.2923, 0.9065], [-3.6755, -2.4477], [-0.4563, -1.0939]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________ test_graph_conv_bi[2-False-False-right-g1-idtype1] ______________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) norm = 'right', weight = False, bias = False, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph'])) @pytest.mark.parametrize('norm', ['none', 'both', 'right']) @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('bias', [True, False]) @pytest.mark.parametrize('out_dim', [1, 2]) def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim): # Test a pair of tensor inputs g = g.astype(idtype).to(F.ctx()) conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx()) # test pickle th.save(conv, tmp_buffer) ext_w = F.randn((5, out_dim)).to(F.ctx()) nsrc = g.number_of_src_nodes() ndst = g.number_of_dst_nodes() h = F.randn((nsrc, 5)).to(F.ctx()) h_dst = F.randn((ndst, out_dim)).to(F.ctx()) if weight: h_out = conv(g, (h, h_dst)) else: > h_out = conv(g, (h, h_dst), weight=ext_w) tests\pytorch\test_nn.py:169: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7275, -0.8995], [-1.5300, -1.6453], [ 0.5779, 1.8393], [ 0.2549, -0.2081], [-0.1928, -1.0716], [-0.5187, 0.1314]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________________ test_tagconv[1] _______________________________ out_dim = 1 @pytest.mark.parametrize('out_dim', [1, 2]) def test_tagconv(out_dim): g = dgl.DGLGraph(nx.path_graph(3)) g = g.to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx) norm = th.pow(g.in_degrees().float(), -0.5) conv = nn.TAGConv(5, out_dim, bias=True) conv = conv.to(ctx) print(conv) # test pickle th.save(conv, tmp_buffer) # test#1: basic h0 = F.ones((3, 5)) > h1 = conv(g, h0) tests\pytorch\test_nn.py:201: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:138: in forward fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.0000, 1.0000, 1.0000, 1.0000, 1.0000], [0.7071, 0.7071, 0.7071, 0.7071, 0.7071], [1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ---------------------------- Captured stdout call ----------------------------- TAGConv( (lin): Linear(in_features=15, out_features=1, bias=True) ) _______________________________ test_tagconv[2] _______________________________ out_dim = 2 @pytest.mark.parametrize('out_dim', [1, 2]) def test_tagconv(out_dim): g = dgl.DGLGraph(nx.path_graph(3)) g = g.to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx) norm = th.pow(g.in_degrees().float(), -0.5) conv = nn.TAGConv(5, out_dim, bias=True) conv = conv.to(ctx) print(conv) # test pickle th.save(conv, tmp_buffer) # test#1: basic h0 = F.ones((3, 5)) > h1 = conv(g, h0) tests\pytorch\test_nn.py:201: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:138: in forward fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.0000, 1.0000, 1.0000, 1.0000, 1.0000], [0.7071, 0.7071, 0.7071, 0.7071, 0.7071], [1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ---------------------------- Captured stdout call ----------------------------- TAGConv( (lin): Linear(in_features=15, out_features=2, bias=True) ) ________________________________ test_set2set _________________________________ def test_set2set(): ctx = F.ctx() g = dgl.DGLGraph(nx.path_graph(10)) g = g.to(F.ctx()) s2s = nn.Set2Set(5, 3, 3) # hidden size 5, 3 iters, 3 layers s2s = s2s.to(ctx) print(s2s) # test#1: basic h0 = F.randn((g.number_of_nodes(), 5)) > h1 = s2s(g, h0) tests\pytorch\test_nn.py:234: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\glob.py:587: in forward alpha = softmax_nodes(graph, 'e') python\dgl\readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python\dgl\ops\segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python\dgl\ops\segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.3325], [ 0.3624], [-0.1815], [-0.0951], [-0.3081], [-0.4183], [ 0.2572], [-0.0201], [-0.0996], [ 0.3648]], grad_fn=) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:780: TypeError ---------------------------- Captured stdout call ----------------------------- Set2Set( n_iters=3 (lstm): LSTM(10, 5, num_layers=3) ) _____________________________ test_glob_att_pool ______________________________ def test_glob_att_pool(): ctx = F.ctx() g = dgl.DGLGraph(nx.path_graph(10)) g = g.to(F.ctx()) gap = nn.GlobalAttentionPooling(th.nn.Linear(5, 1), th.nn.Linear(5, 10)) gap = gap.to(ctx) print(gap) # test pickle th.save(gap, tmp_buffer) # test#1: basic h0 = F.randn((g.number_of_nodes(), 5)) > h1 = gap(g, h0) tests\pytorch\test_nn.py:259: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\glob.py:452: in forward gate = softmax_nodes(graph, 'gate') python\dgl\readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python\dgl\ops\segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python\dgl\ops\segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[0.5741], [0.0933], [0.2595], [0.6300], [0.8454], [0.2226], [0.3971], [0.1155], [0.0340], [0.3834]], grad_fn=) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:780: TypeError ---------------------------- Captured stdout call ----------------------------- GlobalAttentionPooling( (gate_nn): Linear(in_features=5, out_features=1, bias=True) (feat_nn): Linear(in_features=5, out_features=10, bias=True) ) ______________________________ test_simple_pool _______________________________ def test_simple_pool(): ctx = F.ctx() g = dgl.DGLGraph(nx.path_graph(15)) g = g.to(F.ctx()) sum_pool = nn.SumPooling() avg_pool = nn.AvgPooling() max_pool = nn.MaxPooling() sort_pool = nn.SortPooling(10) # k = 10 print(sum_pool, avg_pool, max_pool, sort_pool) # test#1: basic h0 = F.randn((g.number_of_nodes(), 5)) sum_pool = sum_pool.to(ctx) avg_pool = avg_pool.to(ctx) max_pool = max_pool.to(ctx) sort_pool = sort_pool.to(ctx) > h1 = sum_pool(g, h0) tests\pytorch\test_nn.py:285: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\glob.py:94: in forward readout = sum_nodes(graph, 'h') python\dgl\readout.py:180: in sum_nodes return readout_nodes(graph, feat, weight, ntype=ntype, op='sum') python\dgl\readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python\dgl\ops\segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.1293, -2.0777, -0.0795, -0.9244, 0.4134], [ 0.9438, -0.3004, 1.1719, 0.0088, 0.7673], ...0.3537], [ 0.2018, 0.1386, 0.7063, -0.7236, -0.6151], [ 0.3169, 0.4938, -1.0419, -0.9172, 0.1263]]) offsets = tensor([ 0, 15]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:780: TypeError ---------------------------- Captured stdout call ----------------------------- SumPooling() AvgPooling() MaxPooling() SortPooling() ____________________________ test_rgcn[1-idtype0] _____________________________ idtype = torch.int32, O = 1 @parametrize_idtype @pytest.mark.parametrize('O', [1, 8, 32]) def test_rgcn(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) B = 2 I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % B == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:389: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-1.2240], [-0.6973], [ 0.1714], [ 0.7233], [ 0.7863], [ 0.0979], ... [ 0.0148], [ 0.5405], [ 0.1714], [-0.7942], [ 0.6637]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________________ test_rgcn[1-idtype1] _____________________________ idtype = torch.int64, O = 1 @parametrize_idtype @pytest.mark.parametrize('O', [1, 8, 32]) def test_rgcn(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) B = 2 I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % B == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:389: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 0.0338], [ 0.4640], [-0.8780], [ 0.0303], [ 0.6427], [-0.2076], ... [-0.1495], [-1.0149], [ 0.4825], [ 0.1272], [ 0.0723]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________________ test_rgcn[8-idtype0] _____________________________ idtype = torch.int32, O = 8 @parametrize_idtype @pytest.mark.parametrize('O', [1, 8, 32]) def test_rgcn(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) B = 2 I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % B == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:389: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.5004, 0.5150, 0.0837, ..., -0.2301, -0.5909, -0.4255], [ 0.4280, 0.5575, 0.4563, ..., -0.318... 1.2758], [ 0.0272, 0.8308, -0.1578, ..., 0.8969, -0.2559, -0.7068]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________________ test_rgcn[8-idtype1] _____________________________ idtype = torch.int64, O = 8 @parametrize_idtype @pytest.mark.parametrize('O', [1, 8, 32]) def test_rgcn(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) B = 2 I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % B == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:389: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.4904, -0.4048, 0.1054, ..., 0.8263, -0.0182, -0.3286], [ 0.1295, 0.1673, 0.3674, ..., 0.168... 0.0509], [ 0.1634, -0.2912, -0.2321, ..., 0.4551, 0.7200, 0.0128]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________________ test_rgcn[32-idtype0] ____________________________ idtype = torch.int32, O = 32 @parametrize_idtype @pytest.mark.parametrize('O', [1, 8, 32]) def test_rgcn(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) B = 2 I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % B == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:389: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.5956, -1.5377, -0.9770, ..., -0.3251, 0.6088, -0.7434], [-0.5486, -0.7755, -0.4398, ..., -0.234... -0.4531], [-0.4102, -0.0849, 0.3616, ..., 0.0827, 0.4221, -0.3024]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________________ test_rgcn[32-idtype1] ____________________________ idtype = torch.int64, O = 32 @parametrize_idtype @pytest.mark.parametrize('O', [1, 8, 32]) def test_rgcn(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) B = 2 I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis", B).to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % B == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B).to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:389: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.5183, -0.7084, 0.7880, ..., 0.0194, 0.1953, -0.7940], [ 0.3973, -0.3417, 0.0471, ..., 0.442... 0.5010], [ 0.7327, -0.2675, -0.8149, ..., -0.9808, -0.4003, -0.1540]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_rgcn_default_nbasis[1-idtype0] _____________________ idtype = torch.int32, O = 1 @parametrize_idtype @pytest.mark.parametrize('O', [1, 10, 40]) def test_rgcn_default_nbasis(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis").to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % R == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd").to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:444: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 0.1932], [-0.4442], [ 1.2024], [-0.4945], [-0.8663], [ 0.6029], ... [ 0.0407], [ 0.0574], [ 0.3677], [ 0.0793], [ 0.3107]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_rgcn_default_nbasis[1-idtype1] _____________________ idtype = torch.int64, O = 1 @parametrize_idtype @pytest.mark.parametrize('O', [1, 10, 40]) def test_rgcn_default_nbasis(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis").to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % R == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd").to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:444: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 0.6487], [-0.3070], [-0.7505], [ 0.7301], [ 0.1716], [ 1.0842], ... [-0.6335], [-0.5028], [-0.4268], [-0.0053], [ 0.5330]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_rgcn_default_nbasis[10-idtype0] _____________________ idtype = torch.int32, O = 10 @parametrize_idtype @pytest.mark.parametrize('O', [1, 10, 40]) def test_rgcn_default_nbasis(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis").to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % R == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd").to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:444: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.8781, -0.8904, -0.8871, ..., 0.3654, 0.7610, 0.2420], [-0.8465, 0.0241, -0.5828, ..., -0.308... 0.2724], [-0.6384, -0.3284, 0.7624, ..., -1.0435, 0.3147, 0.2131]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_rgcn_default_nbasis[10-idtype1] _____________________ idtype = torch.int64, O = 10 @parametrize_idtype @pytest.mark.parametrize('O', [1, 10, 40]) def test_rgcn_default_nbasis(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis").to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % R == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd").to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:444: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-1.0661, 1.2825, -0.6266, ..., 0.3066, -0.9195, -0.7139], [ 0.5909, 0.1753, 0.1612, ..., -0.780... 0.0345], [-0.8277, 0.4179, -1.4531, ..., -1.2665, 1.1368, -0.1736]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_rgcn_default_nbasis[40-idtype0] _____________________ idtype = torch.int32, O = 40 @parametrize_idtype @pytest.mark.parametrize('O', [1, 10, 40]) def test_rgcn_default_nbasis(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis").to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % R == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd").to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:444: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 0.7001, -0.2555, 0.3475, ..., -0.1695, 0.9420, -0.3143], [-0.4761, 0.0753, 0.1837, ..., -0.200... 0.7230], [-1.5536, 0.9215, -0.9472, ..., 0.8587, -0.6231, 0.3237]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_rgcn_default_nbasis[40-idtype1] _____________________ idtype = torch.int64, O = 40 @parametrize_idtype @pytest.mark.parametrize('O', [1, 10, 40]) def test_rgcn_default_nbasis(idtype, O): ctx = F.ctx() etype = [] g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)) g = g.astype(idtype).to(F.ctx()) # 5 etypes R = 5 for i in range(g.number_of_edges()): etype.append(i % 5) I = 10 h = th.randn((100, I)).to(ctx) r = th.tensor(etype).to(ctx) norm = th.rand((g.number_of_edges(), 1)).to(ctx) sorted_r, idx = th.sort(r) sorted_g = dgl.reorder_graph(g, edge_permute_algo='custom', permute_config={'edges_perm' : idx.to(idtype)}) sorted_norm = norm[idx] rgc = nn.RelGraphConv(I, O, R).to(ctx) th.save(rgc, tmp_buffer) # test pickle rgc_basis = nn.RelGraphConv(I, O, R, "basis").to(ctx) th.save(rgc_basis, tmp_buffer) # test pickle if O % R == 0: rgc_bdd = nn.RelGraphConv(I, O, R, "bdd").to(ctx) th.save(rgc_bdd, tmp_buffer) # test pickle # basic usage > h_new = rgc(g, h, r) tests\pytorch\test_nn.py:444: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\relgraphconv.py:173: in forward g.update_all(self.message, fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 1.0039, -0.5955, -0.0490, ..., 0.8440, 0.4723, 0.3927], [-0.3716, -0.6820, -0.5605, ..., 0.048... 0.0618], [ 0.5527, -0.2110, 0.3849, ..., 0.3952, -0.8336, -0.1499]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gat_conv[1-1-g0-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.1650]], [[-0.6172]], [[-0.1875]], [[ 0.6192]], [[ 0.3698]], ... [[ 0.8059]], [[-0.3322]], [[ 0.2771]], [[ 0.1082]]], grad_fn=) rhs_data = tensor([[[ 0.0200]], [[ 0.0749]], [[ 0.0228]], [[-0.0751]], [[-0.0449]], ... [[-0.0978]], [[ 0.0403]], [[-0.0336]], [[-0.0131]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g0-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 2.4681]], [[-1.6144]], [[ 0.9395]], [[ 1.2816]], [[-0.3932]], ... [[ 0.1351]], [[-0.5771]], [[-2.7710]], [[ 1.8190]]], grad_fn=) rhs_data = tensor([[[-5.9841]], [[ 3.9142]], [[-2.2780]], [[-3.1072]], [[ 0.9534]], ... [[-0.3275]], [[ 1.3992]], [[ 6.7184]], [[-4.4103]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g1-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 1), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.3166]], [[-2.2701]], [[-1.8705]], [[ 0.1864]], [[-0.7116]], ... [[-3.4579]], [[-0.0101]], [[-1.4921]], [[-2.8930]]], grad_fn=) rhs_data = tensor([[[ 0.0792]], [[-0.5676]], [[-0.4677]], [[ 0.0466]], [[-0.1779]], ... [[-0.8646]], [[-0.0025]], [[-0.3731]], [[-0.7233]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g1-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 1), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.4859]], [[ 5.3679]], [[-1.4604]], [[-1.9768]], [[ 1.9826]], ... [[-4.8519]], [[-2.7468]], [[ 4.5642]], [[ 0.4012]]], grad_fn=) rhs_data = tensor([[[-0.3106]], [[-3.4314]], [[ 0.9335]], [[ 1.2636]], [[-1.2674]], ... [[ 3.1015]], [[ 1.7559]], [[-2.9176]], [[-0.2565]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g2-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...r': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.4571]], [[ 0.1209]], [[-0.6261]], [[ 0.0500]], [[ 1.0809]], ... [[ 0.4801]], [[ 0.2702]], [[ 0.2558]], [[-0.2243]]], grad_fn=) rhs_data = tensor([[[-2.2373]], [[-0.5918]], [[ 3.0648]], [[-0.2447]], [[-5.2909]], ... [[-2.3500]], [[-1.3227]], [[-1.2522]], [[ 1.0981]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g2-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...r': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0070]], [[-0.5195]], [[ 1.7941]], [[-0.5468]], [[ 1.7042]], ... [[ 1.1612]], [[-1.2101]], [[-0.7298]], [[ 2.4521]]], grad_fn=) rhs_data = tensor([[[-0.0245]], [[ 1.8113]], [[-6.2548]], [[ 1.9064]], [[-5.9413]], ... [[-4.0482]], [[ 4.2187]], [[ 2.5444]], [[-8.5487]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g3-idtype0] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'ft': Scheme(shape=(1, 1), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.8349]], [[ 1.4136]], [[ 0.7201]], [[ 0.8119]], [[-2.4114]], [[-1.8802]], [[ 0.1493]], [[-0.7703]], [[-0.6132]]], grad_fn=) rhs_data = tensor([[[-0.9068]], [[-1.5353]], [[-0.7821]], [[-0.8818]], [[ 2.6191]], [[ 2.0421]], [[-0.1622]], [[ 0.8367]], [[ 0.6660]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g3-idtype1] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'ft': Scheme(shape=(1, 1), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.7742]], [[ 0.0970]], [[ 0.8368]], [[ 1.0546]], [[ 0.2960]], [[ 0.6417]], [[ 0.2710]], [[-0.0930]], [[-0.2901]]], grad_fn=) rhs_data = tensor([[[-1.3556]], [[-0.1699]], [[-1.4652]], [[-1.8465]], [[-0.5183]], [[-1.1235]], [[-0.4745]], [[ 0.1628]], [[ 0.5079]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g4-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 1), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.5293]], [[ 0.0380]], [[ 0.6663]], [[-0.7647]], [[ 0.4598]], ... [[-0.1009]], [[ 0.6384]], [[ 0.1764]], [[ 0.0025]]], grad_fn=) rhs_data = tensor([[[ -7.3919]], [[ 0.5308]], [[ 9.3042]], [[-10.6788]], [[ 6.4209]], ... [[ -1.4087]], [[ 8.9148]], [[ 2.4640]], [[ 0.0354]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g4-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 1), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-2.1451]], [[-2.8286]], [[ 5.6773]], [[ 0.3103]], [[ 7.2222]], ... [[-3.6402]], [[ 0.4824]], [[ 0.0498]], [[ 2.0053]]], grad_fn=) rhs_data = tensor([[[-1.7711]], [[-2.3355]], [[ 4.6875]], [[ 0.2562]], [[ 5.9631]], ... [[-3.0055]], [[ 0.3983]], [[ 0.0411]], [[ 1.6557]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g5-idtype0] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.6303]], [[-0.3682]], [[ 0.4352]], [[ 0.5459]], [[-1.2812]], [[ 0.6484]]], grad_fn=) rhs_data = tensor([[[-0.9593]], [[-0.5604]], [[ 0.6624]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g5-idtype1] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 2.5130]], [[ 0.3896]], [[ 2.3373]], [[ 3.8646]], [[-0.9286]], [[-0.0705]]], grad_fn=) rhs_data = tensor([[[-4.2474]], [[-0.6584]], [[-3.9504]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g6-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 1), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0098]], [[ 0.1097]], [[ 0.0095]], [[-0.0190]], [[ 0.0505]], ... [[-0.0564]], [[-0.0897]], [[ 0.1960]], [[-0.1524]]], grad_fn=) rhs_data = tensor([[[-0.1806]], [[-2.0224]], [[-0.1748]], [[ 0.3509]], [[-0.9307]], ... [[ 1.0401]], [[ 1.6534]], [[-3.6147]], [[ 2.8102]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-1-g6-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 1), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.9403]], [[ 1.9842]], [[ 0.4303]], [[ 1.8369]], [[-1.9152]], ... [[ 0.2403]], [[-3.4911]], [[-0.6532]], [[-7.9902]]], grad_fn=) rhs_data = tensor([[[-0.5821]], [[-0.5952]], [[-0.1291]], [[-0.5510]], [[ 0.5745]], ... [[-0.0721]], [[ 1.0473]], [[ 0.1960]], [[ 2.3969]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g0-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 2.0756]], [[-2.1026]], [[-1.7871]], [[ 3.6761]], [[ 1.7978]], ... [[ 1.4999]], [[-2.1914]], [[ 4.5788]], [[-2.2304]]], grad_fn=) rhs_data = tensor([[[ 0.3871]], [[ 0.0901]], [[-0.6526]], [[ 2.5571]], [[ 0.5008]], ... [[-0.3593]], [[-0.1909]], [[ 1.0692]], [[-0.0610]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g0-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.1076]], [[ 1.5069]], [[ 0.2092]], [[ 1.3420]], [[ 0.0207]], ... [[-0.9363]], [[ 1.0478]], [[ 0.7604]], [[-1.2209]]], grad_fn=) rhs_data = tensor([[[ 0.5252]], [[-0.8738]], [[ 1.1782]], [[ 0.2439]], [[-0.0819]], ... [[ 0.6176]], [[-0.7654]], [[-0.5823]], [[ 0.3991]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g1-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 5), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.5317]], [[ 0.1660]], [[ 2.0731]], [[ 1.1465]], [[-0.6028]], ... [[ 0.7517]], [[-0.4361]], [[ 0.6501]], [[ 0.3267]]], grad_fn=) rhs_data = tensor([[[ 6.8904]], [[ 2.1512]], [[-4.8666]], [[-4.2607]], [[ 3.7643]], ... [[-2.1651]], [[ 3.7591]], [[-3.2777]], [[-1.9009]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g1-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 5), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.1592]], [[-1.1662]], [[-2.7068]], [[-1.1107]], [[ 2.2456]], ... [[-3.4301]], [[ 0.5366]], [[-0.6935]], [[ 0.4184]]], grad_fn=) rhs_data = tensor([[[ 2.1154]], [[-1.5085]], [[-4.2839]], [[-7.9195]], [[ 3.5641]], ... [[-5.3141]], [[-0.4902]], [[ 0.7335]], [[ 3.3115]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g2-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...r': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.1522]], [[-0.3416]], [[ 0.1758]], [[-0.6830]], [[ 0.3654]], ... [[ 0.1315]], [[-0.8625]], [[-0.2937]], [[ 0.5077]]], grad_fn=) rhs_data = tensor([[[ 0.6708]], [[-0.0107]], [[-0.2822]], [[ 0.3850]], [[ 0.6594]], ... [[-1.2101]], [[ 0.0383]], [[-0.5565]], [[-0.5272]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g2-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...r': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.1790]], [[-0.7814]], [[ 4.9874]], [[-0.6802]], [[-2.2669]], ... [[ 1.8695]], [[-0.0702]], [[-0.5041]], [[ 1.3621]]], grad_fn=) rhs_data = tensor([[[-1.8432]], [[ 0.8166]], [[ 1.9445]], [[ 0.6126]], [[ 0.3049]], ... [[-1.2361]], [[-0.4389]], [[-0.8158]], [[ 1.4580]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g3-idtype0] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'ft': Scheme(shape=(1, 5), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.4389]], [[-7.3465]], [[ 4.4955]], [[-5.5898]], [[-1.6659]], [[-0.2334]], [[-4.2718]], [[-2.1993]], [[ 1.3735]]], grad_fn=) rhs_data = tensor([[[ 0.0936]], [[-2.3284]], [[ 1.2039]], [[-4.2287]], [[ 0.5842]], [[ 1.4835]], [[-1.4408]], [[-0.8756]], [[ 2.0756]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g3-idtype1] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'ft': Scheme(shape=(1, 5), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.2230]], [[-0.1510]], [[-0.1680]], [[-0.4649]], [[ 0.5044]], [[-0.2980]], [[ 0.5686]], [[ 0.2636]], [[ 0.2717]]], grad_fn=) rhs_data = tensor([[[-0.9247]], [[-0.1859]], [[ 0.0740]], [[ 0.5798]], [[-0.1194]], [[ 0.3852]], [[-0.1186]], [[-0.2958]], [[-0.2655]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g4-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 5), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.6896]], [[-1.1888]], [[-1.2367]], [[-0.2142]], [[ 1.8823]], ... [[ 2.8191]], [[-2.4613]], [[ 1.8531]], [[ 1.8106]]], grad_fn=) rhs_data = tensor([[[ 0.3881]], [[ 0.2867]], [[-0.0917]], [[-0.3590]], [[-1.1796]], ... [[-1.1787]], [[ 1.2113]], [[ 0.2954]], [[-1.2517]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g4-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 5), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 5.3988]], [[ 4.1892]], [[ 1.7641]], [[-1.7360]], [[-7.1028]], ... [[-1.3296]], [[ 3.9674]], [[ 1.4202]], [[-3.7856]]], grad_fn=) rhs_data = tensor([[[ 0.9456]], [[-0.2282]], [[-2.2591]], [[ 2.3813]], [[-0.0644]], ... [[-2.0209]], [[ 1.6289]], [[ 0.1251]], [[ 0.7595]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g5-idtype0] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.9991]], [[ 0.7176]], [[-1.6138]], [[ 1.5913]], [[ 0.1723]], [[ 0.2503]]], grad_fn=) rhs_data = tensor([[[ 5.1101]], [[-2.5817]], [[-2.6035]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g5-idtype1] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.6483]], [[-2.5216]], [[-1.3903]], [[-1.7610]], [[-1.6450]], [[-2.9325]]], grad_fn=) rhs_data = tensor([[[ 0.2066]], [[-0.4937]], [[ 1.7869]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g6-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 5), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.3829]], [[ 1.2021]], [[-0.9324]], [[-1.0357]], [[ 0.3120]], ... [[ 2.0654]], [[-0.1161]], [[-0.1563]], [[ 0.7114]]], grad_fn=) rhs_data = tensor([[[-1.5390]], [[-0.3047]], [[ 0.5342]], [[-1.9946]], [[-1.3498]], ... [[ 2.4686]], [[-0.0199]], [[ 0.6054]], [[ 0.8698]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[1-5-g6-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(1, 5), dtype=torch.float32), 'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.3547]], [[ 3.5401]], [[-5.2967]], [[-3.1144]], [[-0.1683]], ... [[ 4.8277]], [[-1.5376]], [[ 3.4097]], [[-1.4697]]], grad_fn=) rhs_data = tensor([[[-0.6725]], [[-3.3961]], [[ 6.5490]], [[ 4.5219]], [[-1.6624]], ... [[-3.8969]], [[ 2.6684]], [[-3.0123]], [[ 1.3172]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g0-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.8391], [-3.5985], [-0.0669], [-0.3181]], [[-0.2745], [ 2.097...346]], [[-1.1638], [-3.4928], [ 0.0262], [-0.1353]]], grad_fn=) rhs_data = tensor([[[-1.4685], [ 3.1456], [-0.2243], [-0.1951]], [[-0.2192], [-1.833...665]], [[-0.9293], [ 3.0533], [ 0.0877], [-0.0830]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g0-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 4.2121e-03], [ 1.1274e-02], [-7.4956e-01], [ 3.5079e-03]], [[ 2.9449e-0...[[-1.3677e-02], [ 1.0243e-01], [-1.4760e-01], [-3.4140e-01]]], grad_fn=) rhs_data = tensor([[[ 2.0521e-01], [-6.0208e-03], [ 8.1934e-01], [-1.2023e-04]], [[ 1.4347e+0...[[-6.6632e-01], [-5.4703e-02], [ 1.6135e-01], [ 1.1702e-02]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g1-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 1), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0731], [ 0.0368], [ 0.1330], [ 0.7415]], [[ 0.2326], [ 0.123...379]], [[-0.2907], [ 0.2626], [ 0.0558], [ 0.9798]]], grad_fn=) rhs_data = tensor([[[ 0.0448], [ 0.1246], [ 0.2100], [ 0.6173]], [[ 0.1424], [ 0.417...967]], [[-0.1780], [ 0.8899], [ 0.0881], [ 0.8158]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g1-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 1), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.2194], [-0.4381], [ 1.5392], [-0.1517]], [[-0.0310], [ 4.048...580]], [[ 0.1122], [-0.8929], [ 1.2413], [-0.2934]]], grad_fn=) rhs_data = tensor([[[-0.2222], [-0.3361], [-1.2055], [-1.7196]], [[-0.0313], [ 3.106...904]], [[ 0.1136], [-0.6851], [-0.9721], [-3.3247]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g2-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...r': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.2129], [-0.1651], [ 0.3476], [-0.5390]], [[ 1.1404], [-0.342...130]], [[-0.7229], [-0.0210], [ 0.5719], [ 0.8942]]], grad_fn=) rhs_data = tensor([[[ 6.2385e-03], [ 7.9481e-02], [-5.4620e-01], [ 1.7512e+00]], [[ 3.3411e-0...[[-2.1181e-02], [ 1.0127e-02], [-8.9883e-01], [-2.9054e+00]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g2-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...r': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.1612], [-0.6846], [ 0.1214], [-0.1106]], [[-1.6178], [-0.275...878]], [[-1.2491], [ 0.4058], [-0.6443], [ 0.1180]]], grad_fn=) rhs_data = tensor([[[ 0.1458], [ 1.6086], [-0.0196], [-0.2803]], [[-1.4632], [ 0.648...226]], [[-1.1298], [-0.9535], [ 0.1042], [ 0.2991]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g3-idtype0] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'ft': Scheme(shape=(4, 1), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.8408], [ 0.9831], [ 0.5273], [-0.2994]], [[ 0.4827], [-1.089...334]], [[ 1.8180], [ 0.5948], [ 0.5420], [ 0.2276]]], grad_fn=) rhs_data = tensor([[[-0.4165], [-0.7754], [-0.7050], [-1.0075]], [[-0.2391], [ 0.859...123]], [[-0.9005], [-0.4691], [-0.7247], [ 0.7660]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g3-idtype1] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'ft': Scheme(shape=(4, 1), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 3.7230e-01], [ 1.5135e+00], [ 8.8428e-01], [-5.5128e-02]], [[-5.9381e-0...[[ 4.9533e-01], [ 9.3543e-01], [-2.7316e-01], [ 9.1078e-03]]], grad_fn=) rhs_data = tensor([[[-0.2332], [ 1.5187], [-0.9573], [-0.2131]], [[ 0.3720], [-1.269...134]], [[-0.3103], [ 0.9386], [ 0.2957], [ 0.0352]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g4-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 1), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0263], [ 0.3737], [ 0.1586], [ 0.0754]], [[ 0.0586], [-0.173...732]], [[-0.0661], [-0.2219], [ 0.0486], [-0.0187]]], grad_fn=) rhs_data = tensor([[[ 0.0509], [-0.7647], [-0.8217], [-1.6832]], [[ 0.1132], [ 0.355...344]], [[-0.1277], [ 0.4542], [-0.2520], [ 0.4168]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g4-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 1), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-3.1385e+00], [ 4.6503e-01], [ 2.0344e-01], [ 4.1876e-02]], [[ 1.8386e-0...[[-3.4285e+00], [ 7.6762e-02], [ 3.6119e-01], [ 5.1487e+00]]], grad_fn=) rhs_data = tensor([[[-2.6840e-02], [ 3.5388e-01], [ 3.0025e+00], [-2.6629e-02]], [[ 1.5724e-0...[[-2.9320e-02], [ 5.8415e-02], [ 5.3307e+00], [-3.2740e+00]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g5-idtype0] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 4.5034], [ 0.2751], [-1.3171], [ 0.7076]], [[ 0.1624], [ 0.993...406]], [[-1.5987], [-2.0014], [ 1.2782], [-0.5551]]], grad_fn=) rhs_data = tensor([[[ 6.4658], [-0.2133], [-0.8975], [-4.6700]], [[ 0.2331], [-0.770...106]], [[ 3.2671], [ 0.5872], [-0.5402], [-2.5438]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g5-idtype1] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.1368], [-0.4337], [ 0.5140], [-2.4391]], [[-1.5370], [ 1.569...015]], [[-0.2784], [-1.1632], [ 0.9167], [ 4.2419]]], grad_fn=) rhs_data = tensor([[[-0.4961], [ 0.0128], [ 0.0567], [ 0.7522]], [[ 5.5758], [-0.046...344]], [[-2.8843], [-0.0208], [-0.0876], [ 0.1101]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g6-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 1), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0218], [ 0.5992], [ 0.6557], [ 0.1496]], [[-0.9409], [-0.168...959]], [[-0.0408], [ 0.2047], [ 0.2937], [ 0.0450]]], grad_fn=) rhs_data = tensor([[[ 8.2178e-02], [ 3.6789e+00], [ 2.0945e+00], [ 2.1408e-02]], [[-3.5414e+0...[[-1.5367e-01], [ 1.2570e+00], [ 9.3822e-01], [ 6.4324e-03]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-1-g6-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 1), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 2.0087e-01], [-4.6334e-01], [-7.3687e-02], [ 1.6846e+00]], [[ 3.4328e-0...[[ 1.9192e-01], [-6.4101e-01], [-4.8252e-02], [ 1.6413e-01]]], grad_fn=) rhs_data = tensor([[[ 0.1795], [-0.2064], [ 1.5393], [ 1.4017]], [[ 0.3068], [-0.025...926]], [[ 0.1715], [-0.2855], [ 1.0079], [ 0.1366]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g0-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0133], [ 0.1136], [-0.1534], [-0.5244]], [[ 0.0412], [ 0.138...536]], [[-0.0864], [ 0.2272], [ 0.2013], [ 0.1085]]], grad_fn=) rhs_data = tensor([[[-0.1387], [-1.3265], [-0.4190], [ 0.4093]], [[ 0.3888], [-0.678...647]], [[ 0.5214], [-0.4421], [ 0.7064], [ 0.2876]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g0-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-3.2400e-01], [-3.3318e-01], [ 7.7738e-02], [-3.8318e-01]], [[ 1.4592e+0...[[ 1.3317e+00], [ 9.8326e-01], [-9.5699e-01], [ 3.7058e-01]]], grad_fn=) rhs_data = tensor([[[-0.3254], [-0.7533], [-0.1201], [ 0.4705]], [[ 1.0818], [ 1.296...022]], [[ 0.1501], [ 0.5917], [ 0.3687], [-0.4503]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g1-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 5), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.0578e+00], [-8.0735e-01], [-3.0872e-01], [-3.8922e-01]], [[ 4.8907e-0...[[-2.5894e+00], [-4.7954e-01], [-1.1894e+00], [-1.8314e-01]]], grad_fn=) rhs_data = tensor([[[-0.0611], [ 1.5512], [ 0.3311], [-0.0716]], [[ 0.3535], [-0.301...298]], [[-0.5186], [ 1.9115], [ 0.6141], [ 0.1359]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g1-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 5), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.6509], [-0.3179], [ 0.5977], [ 0.3514]], [[-0.5883], [-0.156...830]], [[-0.1152], [ 1.5886], [ 0.4192], [ 0.3899]]], grad_fn=) rhs_data = tensor([[[ 1.2406], [ 1.1054], [ 0.3353], [ 1.1703]], [[ 0.0576], [-0.414...642]], [[ 0.7881], [ 0.0257], [ 0.4227], [ 2.0394]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g2-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...r': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 2.7734], [ 2.3463], [ 0.3434], [-0.3500]], [[ 0.8232], [-0.377...111]], [[ 1.1143], [ 0.6319], [ 0.4170], [ 0.2499]]], grad_fn=) rhs_data = tensor([[[ 0.4398], [ 0.9114], [ 0.1064], [ 1.5394]], [[ 0.1279], [ 0.160...280]], [[-0.3495], [ 1.0082], [-0.2303], [-0.4494]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g2-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...r': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.3400], [-0.2702], [ 0.2603], [-0.1598]], [[-0.7320], [-0.729...275]], [[ 0.4009], [-1.3191], [-0.7519], [ 0.5272]]], grad_fn=) rhs_data = tensor([[[ 1.4410], [ 0.3191], [-1.6505], [ 0.5480]], [[-1.5270], [-0.229...487]], [[-0.0774], [-1.5952], [ 0.5087], [ 1.1512]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g3-idtype0] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'ft': Scheme(shape=(4, 5), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.7510], [ 0.4597], [ 1.4093], [ 0.4462]], [[-0.0161], [-0.631...693]], [[ 0.3405], [-0.5441], [-0.1689], [-0.7062]]], grad_fn=) rhs_data = tensor([[[-0.3594], [ 0.8532], [-0.6392], [ 0.3471]], [[-0.3738], [-0.155...196]], [[ 0.0779], [-0.1490], [ 0.4963], [-0.0118]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g3-idtype1] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'ft': Scheme(shape=(4, 5), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 7.2095e-02], [-3.4801e-01], [-6.3903e-01], [ 3.2008e-01]], [[-2.0732e-0...[[-3.5903e-01], [ 1.6846e-01], [-8.8319e-01], [-1.0961e+00]]], grad_fn=) rhs_data = tensor([[[-0.2491], [ 0.4868], [-0.4591], [ 0.5683]], [[ 0.0620], [-0.061...696]], [[-1.2519], [-0.0431], [-0.0397], [-0.9564]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g4-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 5), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 2.3259], [ 0.2847], [ 1.1350], [ 2.6880]], [[ 0.3409], [ 0.530...607]], [[ 0.1240], [ 1.2985], [ 0.1866], [-0.0744]]], grad_fn=) rhs_data = tensor([[[ 1.2000], [-1.0307], [-1.5316], [ 1.3525]], [[-0.6362], [ 1.063...840]], [[-0.8485], [ 1.8330], [-0.8987], [ 2.0917]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g4-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 5), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-9.8859e-01], [-2.0779e-01], [-5.3789e-01], [-6.5061e-01]], [[ 1.3655e+0...[[-2.2603e+00], [ 1.7601e-01], [ 1.8798e-01], [-8.6441e-02]]], grad_fn=) rhs_data = tensor([[[-0.9091], [ 0.4617], [ 0.3319], [ 0.1775]], [[ 1.1608], [-2.194...720]], [[-1.2865], [-0.2895], [-0.1303], [ 0.3112]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g5-idtype0] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.5932], [ 0.9503], [-0.4168], [-0.6529]], [[-0.4913], [ 1.561...315]], [[-0.7327], [ 1.4806], [-0.9935], [ 0.6997]]], grad_fn=) rhs_data = tensor([[[-0.3195], [ 0.1482], [ 0.6857], [-0.1038]], [[-0.2848], [-0.150...359]], [[ 0.0979], [ 0.4512], [ 0.3087], [-0.6100]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g5-idtype1] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.5947], [ 0.3126], [-0.2544], [-0.7775]], [[ 0.0212], [-0.094...340]], [[ 0.5675], [ 0.4992], [-0.6576], [ 0.5452]]], grad_fn=) rhs_data = tensor([[[ 2.2880], [-1.6414], [-0.0386], [ 1.1530]], [[-0.1992], [-0.396...713]], [[ 0.1821], [ 1.5181], [-0.8703], [-0.7521]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g6-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 5), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.3711], [ 0.3426], [-0.5760], [ 0.1867]], [[ 0.6399], [ 0.111...294]], [[-1.2394], [-0.5844], [-0.6362], [ 0.6868]]], grad_fn=) rhs_data = tensor([[[-0.1041], [-0.1129], [ 0.6220], [ 0.1311]], [[-0.6691], [-0.116...912]], [[ 0.8713], [ 0.2302], [-0.5589], [ 0.1271]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_gat_conv[4-5-g6-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'ft': Scheme(shape=(4, 5), dtype=torch.float32), 'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 5.8502e-01], [-8.2176e-01], [ 1.7468e+00], [-8.4681e-02]], [[ 2.9200e-0...[[-1.3753e-01], [-1.5352e+00], [ 9.7422e-01], [-8.6121e-02]]], grad_fn=) rhs_data = tensor([[[ 0.4669], [ 0.6859], [ 0.1342], [-1.0924]], [[ 1.5372], [-2.057...086]], [[-1.2751], [ 1.8732], [ 0.3767], [-1.9647]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[1-1-g0-idtype0] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.4320]], [[ 1.0467]]], grad_fn=) rhs_data = tensor([[[ 0.7716]], [[-1.3917]], [[-1.9567]], [[ 2.0927]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[1-1-g0-idtype1] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[0.2925]], [[1.0149]]], grad_fn=) rhs_data = tensor([[[-3.9312]], [[-1.4319]], [[-1.7173]], [[ 2.4287]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[1-1-g1-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0786]], [[-0.6760]], [[ 0.6412]], [[ 1.7543]], [[-1.5492]], [[ 0.7744]]], grad_fn=) rhs_data = tensor([[[-3.1858]], [[-0.2868]], [[-8.2755]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[1-1-g1-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.5071]], [[ 0.4418]], [[-0.5130]], [[-1.2088]], [[-0.8907]], [[-0.9454]]], grad_fn=) rhs_data = tensor([[[-5.8926]], [[-3.6759]], [[-2.7233]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[1-2-g0-idtype0] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.3434]], [[ 0.4823]]], grad_fn=) rhs_data = tensor([[[-1.4087]], [[ 1.1793]], [[-0.1260]], [[ 0.4495]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[1-2-g0-idtype1] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.6297]], [[-2.3387]]], grad_fn=) rhs_data = tensor([[[ 1.5073]], [[-4.1799]], [[-4.8029]], [[ 2.9013]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[1-2-g1-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.8374]], [[-1.9623]], [[ 2.3261]], [[-1.8526]], [[ 2.7206]], [[ 0.9523]]], grad_fn=) rhs_data = tensor([[[0.5371]], [[0.2202]], [[0.0986]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[1-2-g1-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.1147]], [[ 0.3016]], [[-0.2579]], [[-0.9147]], [[-0.3819]], [[-0.6094]]], grad_fn=) rhs_data = tensor([[[-0.0954]], [[-0.4489]], [[ 1.9191]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[4-1-g0-idtype0] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.2676], [ 0.1643], [ 0.1074], [-0.2957]], [[ 0.0728], [-0.0875], [ 0.0986], [-0.5724]]], grad_fn=) rhs_data = tensor([[[-1.7734], [ 1.4922], [ 0.3974], [ 0.0229]], [[ 5.3069], [-4.322...133]], [[-5.9294], [ 4.4256], [-0.5443], [ 0.0385]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[4-1-g0-idtype1] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0444], [ 0.4088], [-0.1875], [-1.3407]], [[ 0.0212], [ 0.1467], [-0.3226], [-1.3909]]], grad_fn=) rhs_data = tensor([[[-0.1697], [-0.6530], [ 0.8314], [-0.2115]], [[ 0.0469], [-1.626...456]], [[ 0.1050], [-0.6981], [ 0.0377], [ 0.0473]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[4-1-g1-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.0083], [ 0.1242], [ 0.3526], [ 0.6818]], [[-0.1275], [-0.043...193]], [[ 0.3681], [ 0.3535], [-0.4101], [ 0.8809]]], grad_fn=) rhs_data = tensor([[[-1.5370], [ 1.3722], [ 0.5454], [-0.9175]], [[-2.0455], [ 2.171...585]], [[ 0.9192], [-0.3628], [-0.0500], [ 0.2580]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[4-1-g1-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.5533], [-0.0980], [-0.1912], [ 0.2753]], [[ 1.3518], [-0.020...808]], [[-1.2337], [-0.0756], [-0.1983], [ 0.4026]]], grad_fn=) rhs_data = tensor([[[ 7.8029e-01], [-1.1792e+00], [ 4.4077e-04], [ 1.3406e-01]], [[-5.3115e-0...[[ 1.0161e+00], [ 1.8174e+00], [-3.5647e+00], [-1.1046e+00]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[4-2-g0-idtype0] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.3133], [-1.1405], [-1.2005], [-0.4874]], [[ 0.0423], [-0.0979], [ 1.0660], [ 0.8075]]], grad_fn=) rhs_data = tensor([[[ 0.3773], [-3.5723], [-0.5786], [-0.4897]], [[ 0.0703], [-0.086...194]], [[-0.1587], [-4.0715], [ 0.5773], [ 0.0537]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[4-2-g0-idtype1] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.2032], [ 0.2111], [ 0.1349], [-0.2698]], [[-1.0306], [ 2.3088], [-0.0388], [ 0.4123]]], grad_fn=) rhs_data = tensor([[[-0.1317], [ 0.8064], [-0.2262], [ 1.5803]], [[ 0.8038], [-0.087...906]], [[-3.7972], [-0.4466], [ 0.3719], [ 1.8697]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[4-2-g1-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.4655], [-0.5376], [ 0.0758], [ 0.2456]], [[ 0.4037], [ 0.262...758]], [[ 0.4262], [-0.2143], [ 0.1921], [-0.5283]]], grad_fn=) rhs_data = tensor([[[ 1.3178], [-0.6492], [ 0.5210], [-0.8872]], [[-0.5974], [-0.803...880]], [[ 0.8223], [ 0.1198], [-0.1189], [ 0.1537]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gat_conv_bi[4-2-g1-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATConv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:504: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.0931], [ 0.6261], [-1.9108], [ 0.7142]], [[-0.9165], [ 2.080...444]], [[-1.3186], [ 1.6897], [ 1.2442], [-1.9202]]], grad_fn=) rhs_data = tensor([[[ 0.5002], [-0.2068], [ 0.8156], [ 0.5811]], [[-0.2113], [-2.469...318]], [[ 0.1673], [ 0.1176], [ 0.7110], [ 0.7620]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 3.4006]], [[-1.2331]], [[ 1.5457]], [[ 4.0673]], [[ 1.1407]], ...187]], [[-3.1077]], [[-1.2028]], [[-4.0908]], [[ 1.2259]]], grad_fn=) rhs_data = tensor([[[ 0.8317]], [[ 3.8842]], [[ 1.0626]], [[-1.9911]], [[ 1.7514]], ...592]], [[ 0.3502]], [[-1.5231]], [[-0.5106]], [[ 0.0689]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.4397]], [[ 1.4534]], [[ 2.5390]], [[ 1.7433]], [[-2.9063]], ...991]], [[ 5.0230]], [[ 0.6680]], [[-2.0967]], [[ 1.1920]]], grad_fn=) rhs_data = tensor([[[ 0.8855]], [[-0.2862]], [[ 1.3963]], [[ 1.7140]], [[ 3.0068]], ...758]], [[ 0.3029]], [[ 0.9372]], [[-2.3731]], [[ 0.3232]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 3.0653]], [[ 1.0486]], [[-1.8175]], [[-0.7190]], [[-0.2430]], ...167]], [[ 0.1716]], [[ 0.5762]], [[-0.7160]], [[ 1.0905]]], grad_fn=) rhs_data = tensor([[[-0.4232]], [[ 0.9032]], [[ 0.2506]], [[ 0.0391]], [[ 0.5624]], ...410]], [[-2.0975]], [[ 0.7346]], [[ 0.8059]], [[-1.0117]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-4.2231]], [[ 1.6339]], [[ 1.0617]], [[-1.4770]], [[ 5.8954]], ...221]], [[-1.6784]], [[-1.2556]], [[-1.0745]], [[-4.5031]]], grad_fn=) rhs_data = tensor([[[-0.7220]], [[ 0.4323]], [[-0.7422]], [[-0.2063]], [[-1.1746]], ...792]], [[ 0.0992]], [[ 0.7902]], [[-0.0793]], [[ 0.0973]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...r': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.8902]], [[ 2.1492]], [[ 1.0958]], [[ 2.8659]], [[ 0.2726]], ...862]], [[ 1.1245]], [[-2.4504]], [[-0.3411]], [[-3.9278]]], grad_fn=) rhs_data = tensor([[[ 1.1436]], [[-2.7407]], [[-2.1043]], [[ 2.6611]], [[-1.7747]], ...388]], [[-1.6698]], [[-0.2374]], [[ 0.5936]], [[ 0.1277]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...r': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.0911]], [[-3.0789]], [[ 0.0673]], [[-2.1958]], [[ 2.7108]], ...033]], [[-0.3025]], [[ 1.7746]], [[-0.1191]], [[ 1.4300]]], grad_fn=) rhs_data = tensor([[[-4.7716]], [[ 3.4543]], [[ 0.6603]], [[ 1.5550]], [[-0.1754]], ...590]], [[ 0.2426]], [[ 0.0713]], [[-0.3467]], [[ 2.1244]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.7359]], [[ 1.2847]], [[ 0.6146]], [[-1.5344]], [[-0.8204]], [[-2.9005]], [[-0.3618]], [[-3.1979]], [[ 1.6292]]], grad_fn=) rhs_data = tensor([[[ 1.3635]], [[-0.2256]], [[-0.0303]], [[-0.6372]], [[-1.2608]], [[-0.1977]], [[ 1.5722]], [[-2.0326]], [[ 0.2560]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.5605]], [[-1.6850]], [[ 0.5212]], [[-1.2256]], [[ 0.3028]], [[-0.4965]], [[-2.2771]], [[ 0.1697]], [[-1.2633]]], grad_fn=) rhs_data = tensor([[[-1.1682]], [[-1.1677]], [[ 0.3886]], [[-0.6189]], [[ 1.6004]], [[ 0.8832]], [[-1.8164]], [[-0.3867]], [[-2.3792]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 3.2027]], [[ 2.5726]], [[-4.1354]], [[-0.2476]], [[-0.5240]], ...838]], [[-3.5967]], [[ 1.1413]], [[ 1.1737]], [[-1.7776]]], grad_fn=) rhs_data = tensor([[[-0.1346]], [[-1.6616]], [[ 2.6730]], [[ 0.5437]], [[ 1.1903]], ...775]], [[ 0.3275]], [[-1.1535]], [[-0.4655]], [[ 0.5846]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.2378]], [[-1.9865]], [[-2.2731]], [[-3.6583]], [[-1.2145]], ...959]], [[-4.0923]], [[-2.0996]], [[ 1.1122]], [[ 0.7369]]], grad_fn=) rhs_data = tensor([[[-0.3954]], [[ 0.2837]], [[ 1.8011]], [[ 0.8587]], [[ 1.4266]], ...033]], [[ 2.4676]], [[ 1.3969]], [[-0.2243]], [[-1.2974]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g5-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.0665]], [[-0.2400]], [[-1.8651]], [[ 1.0007]], [[ 1.2387]], [[ 1.3013]]], grad_fn=) rhs_data = tensor([[[-0.0665]], [[-0.2400]], [[-1.8651]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g5-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.8393]], [[ 1.9085]], [[-0.2135]], [[-1.7120]], [[-0.7175]], [[-0.2323]]], grad_fn=) rhs_data = tensor([[[-1.8393]], [[ 1.9085]], [[-0.2135]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g6-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.6823]], [[-2.3584]], [[ 0.0413]], [[ 0.8772]], [[ 2.3237]], ...211]], [[-0.6354]], [[-0.4140]], [[ 0.0647]], [[-1.5931]]], grad_fn=) rhs_data = tensor([[[ 0.2263]], [[-3.5048]], [[-0.2677]], [[ 0.4227]], [[ 0.3877]], ...524]], [[-0.5669]], [[-2.2883]], [[ 1.2446]], [[ 0.2460]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-1-g6-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 1), dtype=torch.float32), 'er': Scheme(shape=(1, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-3.0107]], [[ 1.2382]], [[ 0.4750]], [[ 1.7366]], [[-0.8193]], ...037]], [[-1.4020]], [[-0.3603]], [[ 3.6699]], [[ 3.9827]]], grad_fn=) rhs_data = tensor([[[-3.4804]], [[-0.4598]], [[ 0.8298]], [[ 1.1570]], [[-0.2940]], ...298]], [[-0.4173]], [[-1.6607]], [[ 3.3611]], [[ 2.1099]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...), 'er': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.5148e+00, 2.5888e+00, -2.8531e+00, -2.1871e+00, 3.9441e+00]], [[-9.1285e-01, -7.4840e-01, 5.3...00e-01]], [[ 6.1948e-01, -5.0324e-02, -9.4104e-01, -1.0029e-01, 1.5055e+00]]], grad_fn=) rhs_data = tensor([[[ 2.5246, -2.2849, 1.5732, -0.0083, -0.1820]], [[-1.4085, 1.1460, -1.5986, 0.0417, 0.4138]], ... -0.0667, -0.7912, 0.1692]], [[-0.1236, 0.8247, -1.3329, -0.6027, -0.2373]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...), 'er': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.5727, 0.4000, 0.8717, -0.4968, -1.4469]], [[ 0.4046, -0.2565, -0.4604, -1.2137, -1.3510]], ... -0.1572, 0.3949, 0.0574]], [[ 0.2527, 0.3987, -0.5381, -0.5574, -0.0133]]], grad_fn=) rhs_data = tensor([[[-1.7061, 0.7093, -0.2583, -0.5549, 0.7747]], [[ 0.6093, 0.3031, -0.1591, -0.0443, -0.2775]], ... -1.5955, -0.2667, 0.6728]], [[ 0.5848, 0.0063, 0.2395, 0.3745, -0.7827]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 5), dtype=torch.float32), 'er': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.2381e-01, -1.1706e-01, 1.2634e+00, 3.9159e-01, -1.4498e+00]], [[-4.5678e-01, -4.4236e-01, 1.3...92e-01]], [[-1.0501e+00, 2.6102e-01, 1.0067e+00, 4.1708e-01, -1.8376e-01]]], grad_fn=) rhs_data = tensor([[[ 0.5722, -0.5137, 0.7577, -1.2610, 0.0872]], [[ 0.7465, -0.4289, -0.7702, 0.2183, 0.6519]], ... 0.7434, -0.2745, -0.6597]], [[ 0.2686, -0.7324, 0.1841, 0.3042, 0.6393]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 5), dtype=torch.float32), 'er': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.7329, 0.1476, 0.3643, 0.8091, 0.5560]], [[-1.3230, 0.8121, -1.4328, -0.0263, -0.4984]], ... -0.6759, -0.6629, 0.4691]], [[ 0.2818, -1.0143, 1.2156, -3.1878, 0.8440]]], grad_fn=) rhs_data = tensor([[[-0.7300, -1.6988, -1.9720, 0.7772, -0.7080]], [[ 1.2889, 2.0599, 1.7396, -1.9057, 0.4122]], ... -0.8099, -1.0399, -1.4252]], [[-2.8105, -2.4611, -1.4127, -0.5273, -3.0439]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...r': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.6291, 0.8548, -2.4163, 1.3109, -0.6682]], [[ 3.5862, -1.2499, -0.5756, -1.5179, -1.7485]], ... 2.9952, -1.8093, 0.4392]], [[-0.4763, 0.5867, 0.6701, -0.4600, -0.4346]]], grad_fn=) rhs_data = tensor([[[ 1.1074, 0.5343, -0.1144, -1.0100, 0.5554]], [[-4.0283, 0.2205, -2.8170, 1.4045, -1.3161]], ... 1.7036, -0.8401, 0.8875]], [[ 0.1147, -0.8348, 0.7037, -0.7234, 1.0058]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...r': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.4268, -2.9529, 0.5167, 0.5541, -2.7901]], [[ 0.4666, 2.8637, 0.1399, -0.7225, -0.2497]], ... -1.0349, 1.5425, -1.6701]], [[ 0.2791, 0.7481, -1.4440, -0.2677, 0.7414]]], grad_fn=) rhs_data = tensor([[[-1.2443, 5.9451, 0.9736, 2.8319, -0.8730]], [[ 0.9384, -1.8799, -1.1468, -0.7729, -0.2988]], ... 2.2856, 2.1638, 0.3114]], [[ 0.1327, -3.6788, 0.0395, -0.4713, -0.1138]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'el': Scheme(shape=(1, 5), dtype=torch.float32), 'er': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.7852, -0.7929, 0.0372, 0.3858, -0.7274]], [[-1.8366, -0.8200, -2.1524, -1.1223, -0.6887]], ... 0.8607, 0.7988, 1.3602]], [[ 1.4845, 1.4996, 1.1831, 1.7277, 1.5745]]], grad_fn=) rhs_data = tensor([[[ 0.5907, -1.2105, 0.0297, -0.2240, -0.7962]], [[ 2.2359, -0.9497, -0.7823, -2.4867, 0.1249]], ... 1.6151, 3.1935, 0.4308]], [[-2.6489, 1.4154, 1.2612, 3.2913, -0.1927]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'el': Scheme(shape=(1, 5), dtype=torch.float32), 'er': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 5.9275e-01, -1.2774e+00, -7.4747e-02, 1.8935e+00, -3.9118e-01]], [[ 2.0656e+00, -1.8989e+00, -5.2...87e-01]], [[ 1.2896e+00, -2.3212e+00, 3.2567e-04, -2.6042e+00, 6.0513e-01]]], grad_fn=) rhs_data = tensor([[[-2.1752e-02, -5.6488e-01, -1.2923e+00, -8.7637e-02, -8.0199e-01]], [[ 4.8230e-01, -4.8182e-01, 1.7...60e-01]], [[-2.6137e-01, 2.5633e-01, 2.7876e+00, 1.8108e+00, -5.3170e-01]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 5), dtype=torch.float32), 'er': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.9970, 0.0828, 0.9195, -0.7405, -0.4092]], [[-0.6179, 0.3945, 0.5464, 1.8047, -1.2336]], ... -1.1554, -2.2575, 2.1639]], [[ 1.1258, 1.2439, 3.2631, -2.1868, 0.8612]]], grad_fn=) rhs_data = tensor([[[-0.0748, -0.5715, 0.0551, -1.0049, -0.0556]], [[ 0.5954, -0.7703, 3.5665, -0.2745, 1.4391]], ... 0.3603, 1.3518, -1.1579]], [[-1.0414, 0.4542, -1.2743, -1.4739, -1.1903]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 5), dtype=torch.float32), 'er': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.5380, -0.9819, 0.2317, -1.3171, 1.3701]], [[ 0.2318, 0.7418, 0.7520, 1.1458, 0.1644]], ... -0.3337, 1.8191, -0.5714]], [[ 0.7866, 0.5468, -0.2027, 1.2150, -0.2311]]], grad_fn=) rhs_data = tensor([[[ 1.4102e+00, -5.4912e-01, -1.0784e+00, -1.7385e+00, 1.1818e+00]], [[-2.6268e-02, -5.8894e-01, 2.8...68e+00]], [[-3.4879e-01, 1.7133e-01, 9.1761e-01, -2.0260e-01, -1.6320e-01]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g5-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.2705, 1.9891, 0.6123, 1.1502, -1.0621]], [[-0.8657, -1.5141, 0.6779, 1.6001, -1.9514]], ... 1.3996, -1.3605, -0.2892]], [[ 1.4117, -1.4259, 1.1109, -0.7142, 0.9708]]], grad_fn=) rhs_data = tensor([[[ 0.2705, 1.9891, 0.6123, 1.1502, -1.0621]], [[-0.8657, -1.5141, 0.6779, 1.6001, -1.9514]], [[-0.6522, 1.4272, -0.0565, 1.0412, -0.3532]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g5-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.8379, -0.5576, -2.4463, -0.0118, -0.1861]], [[ 1.4168, -0.7050, 2.4124, -1.1660, -0.7984]], ... -2.3412, 1.3344, 0.1921]], [[ 0.0425, 1.3176, -1.1753, 1.2575, -0.6275]]], grad_fn=) rhs_data = tensor([[[-0.8379, -0.5576, -2.4463, -0.0118, -0.1861]], [[ 1.4168, -0.7050, 2.4124, -1.1660, -0.7984]], [[ 0.1903, 0.7414, 1.0749, 0.2642, 1.8135]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g6-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 5), dtype=torch.float32), 'er': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.7497, -0.1481, -0.8256, -0.1280, 0.8366]], [[-0.4532, -0.3371, -0.1108, -0.1293, 0.4619]], ... 0.4651, 1.1399, -1.3687]], [[-1.0048, -0.1007, -0.9975, 0.1151, 0.4724]]], grad_fn=) rhs_data = tensor([[[-0.5395, -0.4127, 0.6390, 0.8460, 0.6513]], [[ 0.2591, -0.4340, 0.3210, -0.7326, 0.9633]], ... -0.7714, -0.2703, -0.0835]], [[-0.1452, -0.6599, 0.6557, 0.2352, 1.1718]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[1-5-g6-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(1, 5), dtype=torch.float32), 'er': Scheme(shape=(1, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 2.1320, -1.8957, 1.0553, -0.0278, 1.5026]], [[ 3.2985, 0.4263, 0.5315, -0.7771, 0.7964]], ... 0.0385, 0.7594, 0.4975]], [[ 0.3184, 0.5912, -0.3036, -0.5452, -0.4118]]], grad_fn=) rhs_data = tensor([[[-3.7486, -2.1635, 1.3521, 0.4901, -0.2648]], [[-0.5974, -0.5042, 0.8162, 1.5071, 0.7871]], ... 0.3711, -0.4932, -0.1110]], [[ 0.9065, 0.2612, 0.7785, 0.5052, 0.5206]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.5248], [ 0.9784], [ 1.1883], [-0.9808]], [[ 1.0447], [ 1.542... [ 0.3052]], [[-0.0335], [ 1.7272], [ 1.7558], [ 0.4224]]], grad_fn=) rhs_data = tensor([[[ 0.4546], [ 1.1677], [-0.7659], [ 1.0554]], [[-0.9779], [ 1.750... [-0.2529]], [[-0.4500], [-1.2631], [-0.3577], [ 0.2579]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-4.6381], [-1.6009], [-0.9538], [ 0.9968]], [[ 4.3718], [ 1.643... [ 2.7943]], [[-0.3910], [-0.5088], [ 1.4960], [-3.0555]]], grad_fn=) rhs_data = tensor([[[ 2.2707], [-2.5914], [ 2.1770], [-3.3759]], [[-0.6990], [ 2.325... [ 0.4315]], [[ 2.0483], [-0.4581], [ 2.5667], [-0.2146]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.8599], [ 0.1408], [-0.1989], [ 0.6758]], [[-0.5660], [ 0.099... [ 0.0580]], [[-0.4466], [-0.0613], [-1.3729], [ 0.6770]]], grad_fn=) rhs_data = tensor([[[ 1.0362], [-0.4959], [-0.1021], [-0.8820]], [[ 0.3601], [-1.286... [ 1.2704]], [[-0.2611], [-0.1512], [-0.5369], [ 0.0809]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.9467], [ 2.4755], [-1.2045], [-0.0873]], [[ 1.2653], [ 0.896... [ 0.7211]], [[-1.3514], [-1.5800], [ 0.5715], [ 0.7125]]], grad_fn=) rhs_data = tensor([[[-1.5102e+00], [ 6.1391e+00], [ 4.4997e+00], [ 3.7628e+00]], [[ 3.8590e-0... [[-1.4777e+00], [-3.3381e+00], [-1.9469e+00], [ 1.8677e+00]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...r': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.2340], [-1.3897], [-2.0290], [-2.3646]], [[-0.5522], [ 0.385... [-0.6346]], [[ 1.2220], [ 1.2213], [-1.6070], [ 4.4519]]], grad_fn=) rhs_data = tensor([[[-1.4340], [-0.3572], [-0.1966], [ 3.0217]], [[ 0.1319], [ 0.145... [ 4.5702]], [[-0.3333], [ 0.1228], [ 0.9334], [ 0.2400]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...r': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.8077], [ 0.5897], [-0.9183], [-0.4936]], [[-0.9822], [-0.182... [-1.5296]], [[ 0.3541], [-0.1692], [ 1.9746], [ 0.0685]]], grad_fn=) rhs_data = tensor([[[ 1.0482], [-1.9302], [ 2.2859], [ 3.3255]], [[-0.8709], [ 0.995... [-0.4375]], [[-1.4299], [ 0.4609], [-1.2054], [-1.3472]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.7441], [ 0.7704], [-0.2934], [-0.5103]], [[ 0.2458], [-0.843... [ 2.4058]], [[ 0.6144], [ 0.1958], [-0.4387], [ 0.0476]]], grad_fn=) rhs_data = tensor([[[ 1.1983], [ 0.3469], [ 0.8223], [-0.6551]], [[-0.0572], [-0.573... [-0.5080]], [[-0.6873], [ 0.3625], [-1.5189], [-0.1705]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 2.4006], [-3.9147], [-0.3165], [ 0.6101]], [[ 4.2568], [-3.100... [-0.5855]], [[-0.8803], [ 2.0410], [-0.6470], [-1.8181]]], grad_fn=) rhs_data = tensor([[[-2.4270], [ 4.3647], [-1.4281], [ 0.0511]], [[-0.5143], [-0.792... [ 1.2132]], [[-1.3485], [ 1.1077], [-0.8536], [ 0.6802]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.6834], [ 1.6342], [ 0.7665], [-1.7392]], [[-2.7071], [-0.737... [-0.7066]], [[-0.6001], [-0.5506], [ 0.3813], [-1.0512]]], grad_fn=) rhs_data = tensor([[[ 1.2763], [ 3.2095], [-0.6509], [-0.9248]], [[-0.3592], [-1.073... [ 0.7559]], [[-0.2009], [-0.4086], [ 0.7775], [ 0.8661]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.0918], [-1.7975], [-0.7189], [-0.1123]], [[ 1.1350], [-3.334... [ 1.0172]], [[ 0.1344], [ 2.7822], [ 2.3481], [-3.3021]]], grad_fn=) rhs_data = tensor([[[-0.7589], [-0.2461], [ 0.4738], [ 0.1189]], [[-0.4025], [ 1.351... [ 1.2861]], [[ 0.8375], [ 1.4122], [-1.1770], [ 0.7791]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g5-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0148], [-0.8263], [-0.9582], [-1.2691]], [[ 0.2637], [-1.662... [ 1.4016]], [[ 1.4606], [ 1.4965], [-1.1859], [ 0.2376]]], grad_fn=) rhs_data = tensor([[[ 0.0148], [-0.8263], [-0.9582], [-1.2691]], [[ 0.2637], [-1.662...[-1.8439]], [[-0.5716], [ 1.0494], [-0.3590], [-2.3777]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g5-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.1904], [ 0.6946], [ 0.9320], [-0.3088]], [[-0.9415], [ 0.141... [-2.3715]], [[ 3.6450], [-0.3156], [-0.3904], [-1.0310]]], grad_fn=) rhs_data = tensor([[[-1.1904], [ 0.6946], [ 0.9320], [-0.3088]], [[-0.9415], [ 0.141...[ 2.4069]], [[ 1.0316], [-0.3703], [-1.1103], [ 0.4166]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g6-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0593], [ 0.2462], [ 0.0334], [-3.8041]], [[-0.7625], [ 1.966... [ 0.7571]], [[-0.2741], [-0.1052], [-1.5081], [-0.0471]]], grad_fn=) rhs_data = tensor([[[ 0.9796], [ 0.1691], [ 1.7341], [-0.9623]], [[ 1.3113], [-0.460... [-0.8449]], [[ 0.2877], [ 0.0187], [-1.1389], [-0.7043]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-1-g6-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 1), dtype=torch.float32), 'er': Scheme(shape=(4, 1), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.0022], [-0.5406], [ 0.0723], [-1.0037]], [[ 1.0301], [-0.958... [-1.1433]], [[ 0.0571], [ 0.3238], [-0.8948], [-1.6934]]], grad_fn=) rhs_data = tensor([[[ 7.3543e-01], [-7.7134e-01], [ 3.8357e+00], [-1.8321e+00]], [[ 6.0420e-0... [[-1.4584e-01], [-1.8161e+00], [-5.3821e-01], [-4.2008e-01]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...), 'er': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.0613, -0.6849, -0.1974, 0.1149, -0.2255], [ 0.8051, -0.1104, -0.3701, 0.0440, 0.4101], ...4, -0.6162, -1.0289, 0.8139], [-0.9716, -0.1513, -0.6588, 0.7798, 0.6332]]], grad_fn=) rhs_data = tensor([[[ 6.2159e-01, -7.8696e-01, 9.8142e-01, -5.5864e-01, 1.4125e-02], [-1.8441e-02, 1.6137e+00, 4.140...8434e-01], [ 6.6451e-03, -2.4498e-01, 1.1806e+00, -8.6186e-01, -2.2997e-01]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...), 'er': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-6.7886e-01, 1.9101e-01, -1.3684e+00, -5.3866e-01, 4.0147e-01], [ 1.4634e+00, 1.3319e+00, 1.200...7418e+00], [ 1.2091e+00, 2.9386e-01, -8.6307e-01, -1.5170e+00, -8.0632e-01]]], grad_fn=) rhs_data = tensor([[[-1.6448, -0.3274, 0.2648, 1.7123, -0.7531], [ 2.1541, -1.4751, 1.9203, -1.6337, -2.5995], ...6, 0.7806, 0.8621, 0.5614], [ 2.6895, 0.2919, 1.0267, 0.0368, -0.5082]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 5), dtype=torch.float32), 'er': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.5743, 0.2410, -0.2388, 0.1787, -0.1523], [ 0.3622, 0.5593, 0.3596, 0.6419, 0.4960], ...4, -0.3340, -0.5994, -0.4656], [-0.8027, 0.5380, -0.0445, 0.2114, 0.4394]]], grad_fn=) rhs_data = tensor([[[ 0.6225, 0.0318, -0.9630, 0.9272, -0.6198], [-0.4992, 0.4227, 0.1694, -0.0806, -0.0693], ...3, 0.3578, -0.6495, -0.0965], [-0.2865, -0.2395, -0.7947, 0.3286, -0.1745]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 5), dtype=torch.float32), 'er': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.3582, 0.6004, -0.4412, -0.7930, 0.6868], [ 0.8057, -0.1199, -0.5953, -0.4729, -0.4331], ...5, -0.9140, -0.4654, -0.4907], [ 0.0962, -0.7285, -0.8933, -0.0422, 0.7941]]], grad_fn=) rhs_data = tensor([[[ 0.0247, 0.5473, 0.1720, -0.5313, -0.0664], [-0.3336, -0.9624, 0.2008, -0.9940, 0.6640], ...2, -1.2648, 0.9912, 0.2205], [-0.2759, 1.0096, -1.2643, -0.5249, 0.2208]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...r': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.5317e-01, 3.6508e-01, 4.8938e-02, -6.3920e-01, 4.2037e-01], [-6.2535e-01, 5.7798e-01, -2.148...4254e-02], [-1.5052e-01, -7.7757e-01, 2.1689e-03, -4.5364e-01, -1.0242e+00]]], grad_fn=) rhs_data = tensor([[[-0.4871, 0.2181, -0.8171, -0.0991, -0.9024], [ 0.2800, 0.0137, -0.3549, 0.6533, 0.3550], ...4, 0.2325, 0.1351, -0.0360], [ 0.2115, 1.2571, 0.5301, 0.2603, 0.1958]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'el': Scheme(shap...r': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.2758, 0.1867, 2.0641, -0.4528, 0.3045], [ 0.3652, 1.5035, -0.3147, -1.8376, -0.2442], ...5, -1.3744, -0.6990, 0.0457], [ 0.5195, 0.7881, 1.7572, 0.1334, -0.2182]]], grad_fn=) rhs_data = tensor([[[-8.0919e-02, 3.4624e-01, -1.2255e+00, 1.2169e+00, -8.6059e-01], [ 5.4277e-01, -4.2840e-01, -1.261...7956e-01], [ 8.3811e-01, -1.6032e+00, 3.1493e+00, -5.6948e-01, 2.9192e-01]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'el': Scheme(shape=(4, 5), dtype=torch.float32), 'er': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-8.0491e-03, -2.3519e-01, 1.8379e-01, 2.4737e-01, -4.8001e-03], [ 2.9900e-01, 4.8019e-01, -5.735...3996e-01], [ 1.4392e-01, 1.3692e-01, -9.4073e-02, -1.0845e+00, 1.2711e+00]]], grad_fn=) rhs_data = tensor([[[ 0.1073, -0.0926, 0.2591, 0.1143, -0.1976], [ 0.1019, 0.2044, 0.4441, 0.2075, -0.2089], ...1, -0.7045, 0.2833, 1.4744], [ 0.2011, 0.6529, -0.8639, -1.3477, -1.1950]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'el': Scheme(shape=(4, 5), dtype=torch.float32), 'er': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.2016, -0.5120, 0.0536, 0.6993, -1.5782], [ 0.3968, -0.7288, 0.5348, 0.8288, 1.7559], ...1, 0.6992, 1.3581, 1.3295], [ 1.0578, 0.3520, -0.9984, -1.1056, -0.5247]]], grad_fn=) rhs_data = tensor([[[-3.4696e-01, -2.1659e-01, 5.5323e-01, -1.8265e-02, 2.1405e-01], [ 2.9571e-01, -6.2857e-01, -8.337...9140e-02], [-1.9953e-01, -2.2892e-01, -6.2482e-01, -2.5903e-01, -1.5734e+00]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 5), dtype=torch.float32), 'er': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.0816, 1.1359, 0.5987, -1.3428, 1.3491], [ 0.9460, 0.4619, 0.5998, 1.0034, 0.1466], ...1, -2.8400, -0.8883, -1.9857], [ 0.1402, -0.4429, 0.7958, 0.8786, -0.2275]]], grad_fn=) rhs_data = tensor([[[ 2.6757, -1.1637, 0.7641, -0.5051, -0.6453], [ 2.3930, -1.7506, 2.7822, -0.7707, 1.1098], ...3, -3.7213, 0.1611, 0.1470], [-0.3553, 0.9276, -0.8301, -0.7092, 1.3590]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 5), dtype=torch.float32), 'er': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.6312e-01, 4.7990e-01, -5.3793e-01, -3.8500e-01, -2.7367e-01], [-4.8571e-01, 2.4375e-01, 5.235...1877e-01], [-1.0897e+00, 4.4055e-01, -4.0708e-01, -9.0817e-02, -4.5936e-01]]], grad_fn=) rhs_data = tensor([[[-0.7214, -0.3112, -0.0996, 0.6509, 0.0337], [-0.5666, -0.4122, -0.3727, -0.3229, 0.3087], ...4, -0.3223, -0.3083, 0.9503], [ 0.2059, 0.0338, 0.1221, -0.3473, 0.5555]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g5-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.2378, -0.4710, -0.7136, 0.9966, -1.6844], [-0.9419, 0.6565, -0.2314, -0.1451, 1.5155], ...8, 0.1321, -0.5014, 0.4384], [ 0.2207, -0.3146, 0.2370, -0.9194, 0.5117]]], grad_fn=) rhs_data = tensor([[[-0.2378, -0.4710, -0.7136, 0.9966, -1.6844], [-0.9419, 0.6565, -0.2314, -0.1451, 1.5155], ..., -1.1368, -0.3990, 0.2568], [-0.7300, 0.0355, -1.9954, -0.4114, 0.2230]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g5-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.0448, -0.5802, 0.5853, 1.5337, -0.0938], [-0.5366, -0.3445, 0.8199, 0.1596, -0.0936], ...7, 0.7534, 1.3606, -1.3052], [-2.3090, -0.4686, 0.6032, -1.3555, -3.0084]]], grad_fn=) rhs_data = tensor([[[-0.0448, -0.5802, 0.5853, 1.5337, -0.0938], [-0.5366, -0.3445, 0.8199, 0.1596, -0.0936], ..., 1.4217, -0.2796, -0.8359], [-0.9235, -0.2591, 0.3156, -0.4141, -1.4557]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g6-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 5), dtype=torch.float32), 'er': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.8917, 0.9453, 0.1839, -2.4546, 0.3208], [-1.3204, 1.9003, 0.3560, 0.4615, 1.2873], ...5, 1.2446, -1.1236, 0.3823], [ 0.3088, -1.6394, -1.4025, 0.4820, 0.1566]]], grad_fn=) rhs_data = tensor([[[ 0.7318, -0.4621, 0.4455, 1.0447, -1.5532], [ 2.2933, 0.8608, 1.0303, 0.8239, 0.3713], ...7, -0.4553, 2.0909, -0.8320], [ 0.6100, 0.9100, 1.4306, -0.2873, 0.6267]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_gatv2_conv[4-5-g6-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'el': Scheme(shape=(4, 5), dtype=torch.float32), 'er': Scheme(shape=(4, 5), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 2.7309, 0.1083, 0.9902, -0.2375, 1.5097], [ 0.5685, -1.3686, -1.8885, -0.0718, 0.5320], ...6, 0.3441, -0.3591, -0.2271], [ 0.3779, -0.6857, -0.8396, -0.0035, -0.2674]]], grad_fn=) rhs_data = tensor([[[ 2.4341e+00, -7.9033e-01, 3.4665e+00, -1.1424e+00, -1.7548e-01], [ 1.6833e+00, 5.3114e-01, 2.301...5984e+00], [ 6.3434e-01, 1.2100e-01, 3.0803e-01, 6.8915e-01, -1.0221e+00]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[1-1-g0-idtype0] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[0.0133]], [[2.2480]]], grad_fn=) rhs_data = tensor([[[-1.7623]], [[ 0.7648]], [[-2.9369]], [[ 0.5808]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[1-1-g0-idtype1] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.9302]], [[ 2.4272]]], grad_fn=) rhs_data = tensor([[[-0.8574]], [[ 1.3571]], [[ 1.0753]], [[-2.6492]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[1-1-g1-idtype0] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 1.2686]], [[ 0.4787]], [[ 0.7578]], [[ 0.5825]], [[ 0.8260]], [[-2.4887]]], grad_fn=) rhs_data = tensor([[[-2.7969]], [[-0.7482]], [[-0.9773]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[1-1-g1-idtype1] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.5194]], [[ 0.1584]], [[ 0.3115]], [[ 1.6498]], [[-2.8148]], [[ 0.1842]]], grad_fn=) rhs_data = tensor([[[ 2.7564]], [[ 3.7364]], [[-1.3609]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[1-2-g0-idtype0] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.6889, -0.2673]], [[-0.7297, 1.5759]]], grad_fn=) rhs_data = tensor([[[-0.0778, 1.1332]], [[ 1.6522, -0.1728]], [[ 0.6892, -1.2536]], [[-0.8748, 0.0889]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[1-2-g0-idtype1] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.8266, 3.5786]], [[ 0.6260, -2.3363]]], grad_fn=) rhs_data = tensor([[[-1.0211, -1.3839]], [[ 0.5972, -1.4325]], [[-2.2996, -1.1856]], [[-1.9642, 0.6281]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[1-2-g1-idtype0] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 2.7550e-01, 3.7824e+00]], [[ 1.4122e+00, -2.6246e+00]], [[-1.6688e-03, -1.6573e-01]], ..., 1.0081e+00]], [[-1.0737e+00, 9.0269e-01]], [[ 3.2830e-01, 1.0958e+00]]], grad_fn=) rhs_data = tensor([[[-1.2388, -0.9786]], [[ 1.7366, 1.6057]], [[ 0.2182, 0.3972]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[1-2-g1-idtype1] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.0693, 1.9331]], [[-2.2001, -3.9850]], [[ 0.7933, 1.0276]], [[ 2.6458, 0.9442]], [[ 5.0171, 6.0013]], [[-2.1558, -1.8875]]], grad_fn=) rhs_data = tensor([[[ 1.4751, 0.9986]], [[-0.1330, 0.3691]], [[ 1.6986, 2.0056]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[4-1-g0-idtype0] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.2608], [-0.4148], [ 0.9681], [-1.8057]], [[-0.7771], [ 3.3052], [ 0.6150], [-1.4448]]], grad_fn=) rhs_data = tensor([[[-0.7026], [-0.7191], [ 2.0442], [-1.2088]], [[ 0.6318], [-1.269... [-2.1726]], [[-1.0498], [ 0.2173], [ 1.4098], [ 2.7376]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[4-1-g0-idtype1] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.0259], [-1.6666], [ 2.8537], [-0.8805]], [[-1.0537], [-0.8144], [ 1.3610], [-0.2527]]], grad_fn=) rhs_data = tensor([[[ 1.1424], [ 0.7351], [ 1.0310], [-0.0480]], [[-2.4787], [-0.478... [ 0.1927]], [[-2.0536], [-0.6037], [ 0.7965], [ 0.1536]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[4-1-g1-idtype0] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.5725], [ 2.8325], [-1.4587], [ 0.4731]], [[-0.3568], [-0.701... [ 1.5426]], [[ 0.5490], [-0.4877], [ 0.0740], [-1.8241]]], grad_fn=) rhs_data = tensor([[[-0.6791], [ 2.0268], [ 1.3207], [ 1.1879]], [[ 0.5816], [ 0.796... [-0.5743]], [[-0.6853], [-1.1905], [ 2.2661], [-2.3645]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[4-1-g1-idtype1] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 3.2302], [ 1.0438], [-2.8922], [-1.0383]], [[-0.9124], [-0.309... [ 0.3991]], [[ 1.6514], [-1.1843], [ 1.1989], [-0.3002]]], grad_fn=) rhs_data = tensor([[[ 0.5667], [-0.4020], [-0.6449], [-0.1821]], [[-2.7037], [ 1.015... [ 0.9937]], [[-1.1280], [ 0.4077], [ 0.6489], [-0.4894]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[4-2-g0-idtype0] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.2637, -0.8810], [-0.2206, -0.1826], [ 0.0475, 0.5577], [ 1.3546, -2.1613]], ...1825], [ 1.8888, -0.3567], [-0.4560, 1.8093], [ 0.7429, 0.6310]]], grad_fn=) rhs_data = tensor([[[ 0.0463, 0.6432], [-0.6608, -0.3124], [ 1.3438, 0.3285], [ 1.5787, 0.6057]], ...5552], [ 0.8361, -0.0426], [-1.7327, 0.7641], [-1.4477, 0.9004]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[4-2-g0-idtype1] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-1.0723, 1.3507], [-0.2587, 0.1481], [ 0.1638, -0.0823], [ 1.0906, -0.1256]], ...2272], [-2.7643, -0.7742], [ 1.6863, 1.1412], [ 0.8595, -1.9046]]], grad_fn=) rhs_data = tensor([[[-2.0025, 0.7664], [-0.7697, -0.1440], [-1.0659, 1.0813], [ 1.2918, 1.1124]], ...1185], [ 0.4473, 0.5248], [ 0.2229, -1.7197], [ 1.5281, 2.9379]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[4-2-g1-idtype0] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 3.1099, -0.4278], [-0.0066, -1.1153], [-2.0509, -0.3534], [ 0.5579, -1.2069]], ...5524], [-0.0546, 1.4579], [ 0.4669, 0.1831], [ 0.6552, -1.0917]]], grad_fn=) rhs_data = tensor([[[-2.0707, -0.0367], [-0.6861, -1.9129], [-3.4629, 1.5843], [ 1.0048, -0.5094]], ...0521], [ 0.0683, 0.3065], [ 0.2424, -0.8365], [ 0.2386, -0.0861]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_gatv2_conv_bi[4-2-g1-idtype1] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_gatv2_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gat = nn.GATv2Conv(5, out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gat = gat.to(ctx) > h = gat(g, feat) tests\pytorch\test_nn.py:543: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatv2conv.py:288: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.8851, -1.0254], [-0.1103, -1.0770], [ 0.5852, -2.0093], [-1.2819, -0.4825]], ...4015], [ 0.7568, -0.6128], [ 1.2711, 0.9593], [ 1.4093, -0.6879]]], grad_fn=) rhs_data = tensor([[[ 3.2303, 0.2029], [-0.6922, 4.7644], [ 2.1568, -0.4187], [ 0.6550, 3.5110]], ...0094], [ 1.2285, 4.7648], [-0.4397, -1.8284], [ 0.3382, 3.4759]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.9837], [-1.1596], [-1.2099], [ 2.1841], [ 1.0622], [-2.0678], [ 1.0281], [-0.2626], [-2.3762], [-1.4083]], grad_fn=) rhs_data = tensor([[ 1.2065], [-1.3186], [ 0.5730], [ 0.7216], [-2.4841], [ 1.2998], [ 0.4063], [-0.8652], [-1.0416], [-0.8486]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.7643], [ 1.1458], [-3.6263], [-0.6129], [-0.8150], [-0.6306], [-2.8359], [-3.0163], [ 1.6360], [-4.0968]], grad_fn=) rhs_data = tensor([[-0.2899], [-1.6179], [ 1.5335], [-0.7805], [ 0.6817], [-0.5212], [ 0.6766], [ 1.2876], [ 0.3656], [-0.0324]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 3.4316], [ 1.7854], [ 0.4916], [-1.4800], [-3.0656], [ 0.9585], [-0.5771], [ 1.3040], [ 1.7081], [-0.6167]], grad_fn=) rhs_data = tensor([[ 0.4872], [-4.4221], [ 0.2213], [-0.1164], [ 2.1821], [-0.5149], [ 2.4895], [-2.8139], [-1.5421], [ 2.6373]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.3001], [ 0.9268], [-1.9912], [-0.3428], [-1.5073], [-0.6777], [ 0.5952], [ 1.7083], [ 1.0207], [-0.6972]], grad_fn=) rhs_data = tensor([[ 1.5802], [ 1.4154], [-3.8826], [-2.8975], [-1.0542], [-0.6293], [-1.8504], [-2.3198], [ 2.1504], [-0.8563]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.3689], [ 1.6992], [-0.3913], [-0.6812], [ 1.5617], [-0.1367], [-1.4977], [-0.3283], [-1.8440], [-1.2188]], grad_fn=) rhs_data = tensor([[-0.4436], [ 0.3662], [ 0.8571], [ 5.8676], [-2.7601], [ 1.8752], [-1.3885], [-1.5361], [-2.3169], [ 0.7906]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.0210], [-1.3353], [-2.5912], [-3.9179], [ 1.4500], [ 1.4419], [ 0.1789], [ 0.1028], [ 3.1465], [ 3.4479]], grad_fn=) rhs_data = tensor([[ 1.5270], [-3.1317], [-0.3421], [-1.7025], [-1.8789], [ 3.1518], [-0.5934], [ 1.8909], [ 0.5270], [ 2.5669]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.8084], [ 1.4252], [-2.5860], [ 0.9591], [ 0.7286], [ 0.2962], [-1.4975], [ 1.4559], [ 0.6721]], grad_fn=) rhs_data = tensor([[ 0.1702], [ 2.0008], [-0.0319], [-1.7652], [ 0.2168], [ 2.8688], [-3.2648], [-2.5955], [ 0.5352]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.6223], [ 0.2371], [-0.4917], [ 0.7207], [ 2.4616], [ 3.6629], [-1.3165], [-1.7199], [ 1.6290]], grad_fn=) rhs_data = tensor([[ 0.2893], [ 1.1330], [ 0.0593], [ 2.1957], [-2.2938], [-1.3900], [ 0.7139], [ 3.9108], [ 1.2891]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-2.2489], [ 0.6977], [-0.6810], [-0.3992], [-0.9549], [ 0.0957], [ 0.3528], [ 0.3547], [ 2.3431], [-1.2437]], grad_fn=) rhs_data = tensor([[ 0.6134], [ 0.6300], [ 0.4434], [-1.4351], [-2.4716], [ 0.5775], [-0.1511], [-0.5965], [-0.7978], [ 0.7124]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.3889], [ 1.7746], [-2.5886], [ 0.3332], [ 1.4764], [ 0.5246], [-0.4633], [ 3.0800], [-3.0837], [ 0.5896]], grad_fn=) rhs_data = tensor([[ 0.5251], [-1.6353], [ 3.8769], [ 0.3921], [ 0.5063], [ 0.3722], [-0.1645], [-3.1691], [-0.3126], [ 0.6427]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.5590], [ 3.4209], [ 2.7960], [-1.5737], [ 1.3153], [ 1.7566], [-0.2015], [-1.0274], [ 0.8919], [-1.8365]], grad_fn=) rhs_data = tensor([[-1.3269], [-1.5947], [-1.4248], [ 2.1636], [-0.7930], [ 0.0675], [-1.2462], [ 1.1313], [-1.5790], [ 2.4162]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-1-g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.4828], [ 0.0515], [ 0.5216], [ 2.0078], [ 1.7605], [-0.2434], [-2.0837], [ 2.1624], [ 0.6064], [ 0.2911]], grad_fn=) rhs_data = tensor([[-2.3120], [-2.7919], [-3.0364], [-0.3088], [-1.4967], [-0.4539], [ 0.3483], [-0.9393], [-2.0719], [ 4.1374]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.2232], [ 0.8939], [ 0.2908], [-0.6565], [ 0.0358], [-1.6915], [ 1.0600], [-1.1864], [-1.2970], [-2.6229]], grad_fn=) rhs_data = tensor([[ 2.7841], [ 0.0039], [ 1.1798], [ 0.3332], [-0.6660], [ 0.0334], [ 1.5234], [-2.8464], [-1.5105], [-2.3193]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.3848], [ 3.6361], [ 0.5994], [ 0.0802], [ 2.1109], [ 0.6610], [-1.3027], [ 5.7181], [ 0.2420], [-0.0644]], grad_fn=) rhs_data = tensor([[ 2.9723e+00], [ 2.0588e+00], [-1.5392e+00], [-5.2600e-01], [-1.6400e-03], ...01], [-3.1768e+00], [ 4.5365e+00], [ 7.5092e-01], [-2.2005e+00]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.8083], [-0.6315], [ 1.5591], [-1.5418], [-1.3874], [ 0.2553], [-0.5505], [ 0.2332], [-1.0462], [ 3.4172]], grad_fn=) rhs_data = tensor([[ 0.1259], [ 0.4014], [ 0.8101], [ 2.7692], [ 1.9802], [ 0.4524], [ 0.3151], [-2.6044], [ 0.1397], [ 0.2772]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[2.1847], [0.8934], [1.5143], [1.9313], [2.5140], [0.1342], [2.2550], [0.9844], [2.3754], [1.0901]], grad_fn=) rhs_data = tensor([[-0.7138], [-0.1913], [ 0.9808], [ 1.9521], [ 1.5255], [-0.1336], [ 2.1051], [-0.0150], [ 0.2672], [ 2.2029]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 3.6684], [-3.8246], [-2.2607], [-1.6092], [-2.1457], [-2.4016], [ 0.8714], [-2.6216], [ 1.2264], [-1.1432]], grad_fn=) rhs_data = tensor([[-1.2083], [ 1.3820], [-1.2841], [-1.9813], [-1.9693], [-0.5169], [-2.3614], [ 0.1031], [-1.7166], [-0.6641]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.1769], [-1.2392], [-0.7810], [-0.1015], [ 0.9861], [ 0.7646], [ 1.4994], [-0.4446], [ 2.1092], [ 1.1567]], grad_fn=) rhs_data = tensor([[ 0.6555], [-1.0613], [-0.9363], [ 0.4830], [ 2.6376], [ 2.1240], [ 3.3062], [ 1.1973], [ 5.2595], [ 2.3797]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.5390], [ 1.5751], [ 0.8392], [ 0.7174], [ 2.2216], [ 2.0319], [-3.7562], [ 0.2812], [ 2.7828]], grad_fn=) rhs_data = tensor([[-2.1967], [ 3.1941], [ 0.3549], [ 0.9047], [ 0.2886], [ 0.3208], [ 2.3009], [ 0.7998], [ 3.7961]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.5730], [ 1.3183], [-3.3253], [-0.7844], [-2.7867], [ 0.9051], [-2.0811], [ 0.0180], [ 0.9482]], grad_fn=) rhs_data = tensor([[ 0.6680], [ 2.1042], [-0.9548], [ 0.1887], [-1.5646], [-2.5504], [-0.4785], [ 0.2376], [-0.1559]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.1369], [ 0.9024], [-0.7537], [ 0.5417], [ 1.0035], [-1.0498], [ 1.4059], [-0.8821], [ 1.1979], [ 1.7486]], grad_fn=) rhs_data = tensor([[-0.2269], [ 2.6755], [ 0.3550], [ 2.6903], [ 4.0163], [-0.8691], [-1.3069], [-1.3130], [-0.1179], [ 1.2068]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-2.5050], [-1.3629], [-1.3069], [-2.7562], [ 1.2322], [ 1.8122], [-0.8600], [ 1.0151], [-0.6798], [-1.5950]], grad_fn=) rhs_data = tensor([[-1.2059], [-1.9523], [ 2.2092], [ 2.3935], [ 2.2850], [ 0.1742], [-5.0623], [-2.0806], [-1.2818], [ 3.9557]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.0983], [-0.6363], [ 0.8019], [-1.7780], [-0.9122], [ 3.2995], [ 4.0411], [ 1.0527], [-3.4736], [ 0.0752]], grad_fn=) rhs_data = tensor([[-0.3303], [ 0.1051], [ 1.4269], [-2.4812], [-0.9091], [ 1.1765], [ 1.5323], [-1.7500], [ 1.0788], [-2.0021]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-1-5-g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(1,), dtype=torch.float32), 'f_nj': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.4877], [ 2.3753], [-3.1280], [ 1.7633], [ 0.5442], [ 1.4534], [-1.0089], [ 1.0435], [ 0.2633], [ 0.9303]], grad_fn=) rhs_data = tensor([[ 0.7791], [ 1.2760], [-0.2289], [ 2.0642], [-2.2068], [ 4.2850], [ 0.1671], [ 0.8219], [ 4.3436], [ 1.5679]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.2292, 0.7761, 0.8729, 0.0677, -0.0351], [ 1.7661, 1.0958, 0.8817, 0.0134, -0.8056], ...962, -0.4775, 0.5063, -1.8566, -1.2691], [-0.1253, -1.9480, -1.6616, -0.8573, 0.5076]], grad_fn=) rhs_data = tensor([[ 0.2724, -0.3118, -1.1077, 1.1291, -0.2934], [-0.7067, 1.0285, -0.3682, -1.4555, -0.3965], ...785, -1.3536, 1.1341, 2.1100, 0.1725], [ 1.6862, -2.6581, 2.0313, -0.7307, 0.5329]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.5567e+00, 3.7043e-01, -1.1112e+00, 5.4604e-01, 9.7542e-01], [ 2.0248e-01, 4.5994e-01, -1.7311e... 1.1999e+00], [ 4.6394e-01, 1.0419e+00, -1.4428e+00, -2.8518e-01, -1.8103e+00]], grad_fn=) rhs_data = tensor([[ 3.0364, -1.0318, 0.0287, 1.1455, -0.2633], [-1.0490, -3.4290, -0.2606, -3.2877, 0.9691], ...434, 1.6149, -0.1309, -0.0900, 2.1924], [ 1.2451, -2.0974, -1.1284, -0.4601, 2.0588]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.1543, -0.3406, 1.3935, -3.1570, 0.6227], [ 2.4716, 1.3599, 0.7560, -0.0414, 1.7562], ...964, 0.1458, -3.7151, -0.2945, 0.7375], [-1.0108, -0.3073, 1.6134, -0.3309, -0.3012]], grad_fn=) rhs_data = tensor([[-0.2096, -1.0450, 1.7865, 0.5910, 1.8447], [-1.0185, 0.2223, 2.3722, -2.3820, -2.1388], ...615, 1.6159, -0.1511, -1.5254, -1.9828], [-0.4477, -0.1013, -1.2512, -0.2377, 0.3427]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.1198, 0.2209, 1.4118, 0.9327, -0.1623], [ 0.7169, -1.4005, 3.0707, 0.6256, -0.4798], ...518, 0.9069, 0.2913, 2.2662, 0.1914], [-3.2028, -1.0832, -0.2766, 0.1172, 1.9696]], grad_fn=) rhs_data = tensor([[ 2.5442e-01, 4.7672e-02, -1.9061e+00, -2.3700e+00, -2.1104e-01], [-2.2052e-03, 1.0820e+00, 5.3269e... -1.1084e+00], [-2.4736e+00, 7.9400e-01, 1.6422e+00, 7.8995e-01, -1.2234e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.8698, 0.4731, -0.6279, -0.7516, -0.2538], [-0.4324, -1.1567, -1.4843, -1.0208, 0.0378], ...554, 1.5762, 2.2341, -3.4180, 0.5337], [-2.8927, -2.4243, 0.3537, 0.9134, -1.8519]], grad_fn=) rhs_data = tensor([[ 2.9725, 0.2351, 2.9036, -4.4582, 1.2636], [-3.4791, 2.4749, -3.3202, 2.0570, -0.5733], ...851, 1.4939, 0.7504, -1.6213, -0.3278], [ 1.4869, -3.5392, 2.5625, 0.5170, -3.3768]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.0135, 0.1967, 1.3619, -0.7399, 2.4161], [-2.3796, 1.6751, -2.0963, 0.4456, -1.9764], ...969, -1.0809, -1.1263, -0.8281, -1.8139], [-0.4464, -0.1177, 0.6837, 2.9737, -0.8409]], grad_fn=) rhs_data = tensor([[-1.7881, -2.5947, 1.8418, -2.2188, 1.7566], [-0.5611, 0.1087, -1.3058, 3.4624, -1.5013], ...867, -1.1388, -3.6475, 2.4635, -2.4500], [ 0.1178, -3.8976, -0.5162, 3.8410, -3.1021]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.6274, -1.9813, 0.3693, -2.2988, -2.2915], [-1.9376, -0.8767, -2.7906, -0.6635, -1.5620], ...615, 2.5475, 0.0974, 0.0206, 1.8997], [-0.2841, 2.2899, -0.3244, 0.7462, 1.1603]], grad_fn=) rhs_data = tensor([[ 2.2075, 0.2019, 0.2367, 3.6682, -3.2571], [-2.9066, 0.3186, -2.2495, -0.1979, 2.9868], ...704, 0.4992, -0.2510, 0.0846, -0.1753], [-0.8731, 0.4930, -0.8623, 0.1239, 3.2514]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.2502, -0.1276, 0.5703, -0.1527, 0.9007], [ 2.6106, -0.8256, -0.9256, -1.0412, 0.7527], ...357, 0.5633, 0.2226, -0.0466, -1.1762], [ 2.7168, -3.0784, -0.0506, -0.4492, -0.7324]], grad_fn=) rhs_data = tensor([[-2.1059, 2.4533, -1.0715, -0.6698, 0.3795], [-1.7206, 4.0144, -2.1319, 0.0607, -0.2874], ...299, -0.2342, -0.4059, -1.9755, -1.2225], [-0.2999, 4.6867, -1.3491, -0.0919, -1.8703]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.2136, -0.9638, 1.4458, -0.9056, 0.7961], [ 2.6831, -1.7321, 1.9454, -0.5555, 0.3980], ...582, -0.0589, 0.3639, 0.1005, -1.9299], [-3.4502, -1.2031, -0.8054, -1.2209, 1.4334]], grad_fn=) rhs_data = tensor([[-1.5482, -1.3303, -1.1250, -0.8845, -0.0371], [-0.2660, -0.0963, -1.1495, 0.3533, 0.6082], ...484, 2.2112, 0.8943, 1.2655, -0.3795], [-0.8039, 1.7557, -0.2629, 1.2672, -1.4617]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.9538, 1.9636, -1.0473, 1.5812, -1.1933], [ 3.1597, 1.6426, -1.6081, -4.2960, -0.1847], ...205, -0.2807, -0.7947, -0.1322, -0.2584], [ 1.3606, 0.4565, -2.1084, -2.9611, -0.0272]], grad_fn=) rhs_data = tensor([[-4.1490e-01, 2.7696e-04, 1.4479e+00, -5.5889e-01, 6.2391e-01], [ 5.8932e-01, 2.2507e-01, -1.0255e... 5.9090e-01], [-5.3113e-01, -1.4547e+00, -7.5951e-01, 2.7943e+00, 9.5287e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.5821, -1.9691, -1.5370, -1.9131, 1.7537], [ 0.5591, 1.3083, 0.6558, 1.3594, -0.4754], ...859, 2.0641, -2.5095, -0.0132, 0.5487], [ 0.9767, 2.5717, -0.3369, -3.3207, -0.3065]], grad_fn=) rhs_data = tensor([[ 0.3420, 0.2810, -1.0181, 0.1686, 1.6525], [-0.8873, -1.8001, -0.1218, 0.7587, 1.9681], ...586, 0.8394, 2.0915, 0.4825, -0.5250], [-0.8588, 0.5579, 0.7878, -0.7125, 4.4885]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-1-g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.3711e+00, 5.1826e-02, 1.5582e+00, -2.8094e+00, -9.9020e-01], [ 6.0076e-01, 1.8137e+00, 6.1193e... 1.3008e-01], [-6.9144e-01, 3.3420e+00, -5.8137e-01, -1.5156e+00, -8.9822e-01]], grad_fn=) rhs_data = tensor([[ 0.5989, -1.7439, 0.4382, -0.0057, 2.6945], [ 2.5224, -0.1026, -1.0784, 1.8687, -0.5587], ...985, 1.7506, -0.6615, 0.5553, -0.8469], [ 0.8139, -0.5157, -3.8765, 1.2383, -0.1366]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.5224, 4.0198, -0.9121, 0.8066, 0.2421], [ 0.2687, 0.0524, 0.4975, 0.3253, -0.0616], ...524, 0.5998, -2.1082, -0.2585, -0.6619], [ 1.9605, -1.9432, 0.8023, -0.7697, -0.0879]], grad_fn=) rhs_data = tensor([[ 0.1521, -2.7563, 2.1220, 1.0328, 0.5755], [-0.3067, -0.8002, 0.9515, 1.0783, -0.0851], ...759, -0.8033, 2.0626, 0.6030, -1.4578], [-0.1893, 0.3429, -2.9925, -0.9350, 4.1221]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.3066, -0.5182, 0.1193, -0.0776, 1.4858], [ 0.7725, -2.2485, 1.4806, 0.7869, 0.3996], ...206, -1.1703, 0.7422, -0.1161, 1.4291], [ 1.7498, -0.0964, 3.3670, 1.3568, -0.0539]], grad_fn=) rhs_data = tensor([[ 2.0710e+00, -1.3243e+00, -5.3452e-01, 1.6118e+00, -5.7696e-01], [-4.0600e-01, 1.1775e+00, 1.8890e... -3.8951e-01], [-1.2197e-02, 3.8464e+00, 1.7388e+00, 3.3590e-01, 9.4638e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-2.2770e-03, 1.5243e+00, 1.9323e-01, 2.1749e-01, 4.1734e-01], [ 2.9220e-01, 3.6485e-01, 2.5542e... 4.1600e-01], [-1.8994e+00, 2.4025e+00, -2.3199e+00, 7.5394e-01, 1.8846e-01]], grad_fn=) rhs_data = tensor([[ 1.2815e+00, -2.4189e+00, -1.2301e-01, -6.5389e-01, 1.0223e+00], [-1.8534e+00, 4.3350e+00, 6.7586e... -3.8844e-01], [ 8.4119e-02, 2.6608e+00, -7.7496e-01, -2.1621e+00, -4.7734e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-2.6950, 2.2951, -0.1309, -0.0055, -3.1580], [-4.5825, 1.6883, -2.1557, -2.5061, 0.4735], ...427, -1.3015, -0.9402, 3.7236, 1.7030], [ 0.7454, -2.9531, 0.4406, 2.2932, 0.2160]], grad_fn=) rhs_data = tensor([[-1.1289, 0.4693, 0.0560, -0.1606, -2.6036], [-0.0786, 1.6601, -0.6393, 0.4642, 0.0210], ...776, 1.0611, 0.4576, -0.1061, 1.4631], [ 0.5337, 1.9598, 0.9898, 1.9486, -0.5580]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.7537, 2.0143, 0.3658, 0.7582, -1.8089], [-0.4673, 1.1702, -3.6322, 1.0810, 1.5954], ...586, -0.1455, -1.6464, -0.2126, -1.6582], [ 1.4705, 1.4972, 0.2438, 0.7064, -2.4887]], grad_fn=) rhs_data = tensor([[-0.0246, 0.3147, 1.2331, -1.4804, 1.1563], [ 0.2305, 1.2871, 3.1029, -3.2249, 0.7716], ...642, 1.3884, -0.4639, 0.5447, 0.7121], [ 1.3656, -1.5995, -0.4328, 0.7571, -3.4159]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.8379, 0.7170, -0.8860, -0.9213, -1.5779], [ 0.5741, 1.7654, 1.3555, -1.0251, 0.3889], ...383, 0.2383, -0.8433, 0.2941, -0.2822], [-3.5883, -2.0470, -0.7100, 0.0262, 0.3010]], grad_fn=) rhs_data = tensor([[ 0.2643, -0.6982, 3.4455, -1.7912, 1.5166], [-2.1576, -1.8441, 0.1659, -1.9829, -0.1315], ...737, 0.1600, 0.3329, -0.6188, -0.8410], [ 1.4112, -0.1187, -2.8850, 0.2129, -0.9516]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' ______________________ test_egat_conv[1-5-5-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.5511, 0.3179, 1.7453, -2.7484, 1.2316], [-2.4131, 0.8740, -1.9306, 0.6628, -1.4297], ...656, 1.8283, 0.5266, -4.1062, -0.6205], [ 1.7387, 2.5635, 1.0679, 0.2465, -0.2020]], grad_fn=) rhs_data = tensor([[-0.3100, -0.2909, -0.2811, 0.2584, 2.4082], [ 0.1045, -1.5974, 1.4981, 0.8283, 1.8680], ...759, -2.3608, 1.4576, 1.1587, 1.3148], [-0.0707, -0.7638, -0.2511, -1.1084, -2.9884]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.7389, -2.9631, -0.3714, 0.3155, -2.1281], [ 0.9205, 3.3359, -1.3430, -0.4578, 0.0119], ...834, -0.7278, 1.7913, -0.0206, -0.1821], [ 0.5815, -0.3880, -2.6453, -0.3300, -1.9916]], grad_fn=) rhs_data = tensor([[-0.2530, -1.9193, 1.0921, 1.6466, 2.9917], [ 0.0329, 2.8371, 0.4122, -1.7736, -2.8467], ...098, 0.7569, -0.9536, 0.0831, -3.9703], [ 2.0233, -0.0954, 0.8983, 1.0181, 0.0321]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.7091, -1.9608, -1.0627, 1.5727, -0.7033], [ 0.2077, -0.1939, -1.2146, -2.7953, -1.4495], ...293, 0.0235, 0.4523, 0.2061, -0.1833], [-0.6810, -0.5318, -0.5044, -1.6564, -0.3337]], grad_fn=) rhs_data = tensor([[ 0.8952, 1.5233, 2.0281, -3.0786, 0.1367], [ 1.5246, 1.3504, -1.0947, -0.0165, 1.2030], ...460, -0.8525, 0.7980, -0.6268, 0.0291], [-0.6504, 0.5439, 0.0781, 1.1774, -1.1338]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.8086, 0.9245, 0.5835, 0.9028, 0.9542], [ 0.8189, -0.3348, -1.7175, 0.9434, -0.1541], ...599, 1.1003, -0.9780, 1.2732, -0.1804], [-3.2987, 1.4191, 0.4780, 0.7067, -0.4373]], grad_fn=) rhs_data = tensor([[-0.7585, 0.5401, -1.2831, -1.1542, -0.6545], [-0.0938, -1.0621, -0.6654, 0.6071, 0.2818], ...408, -0.5371, -0.9386, -0.3707, -0.4968], [-0.0840, -0.1149, -0.2563, -0.3345, -1.7704]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 5.2904, -5.6345, 2.3054, -1.2926, 0.2312], [-2.3719, -2.9285, -1.6052, 2.7780, -2.6947], ...764, -6.8275, -0.1479, -1.6348, -0.2559], [ 0.0123, 1.3724, -0.2375, -0.2877, 0.0654]], grad_fn=) rhs_data = tensor([[ 1.8065, -1.0380, -2.2696, 1.5252, -0.7376], [ 1.8640, -2.1299, 3.9823, 1.5301, 0.6907], ...146, 1.7678, -3.5091, 0.7423, -3.0150], [-0.0657, -0.4329, 1.2101, -0.7717, 0.6540]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[1-5-5-g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(5,), dtype=torch.float32), 'f_nj': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.7367, 2.2677, 2.3784, -2.1027, -0.2515], [-1.5536, 0.5589, -1.2308, -0.6574, -1.0994], ...716, 1.8955, -1.0991, 2.4814, 1.3006], [ 3.5422, -0.1395, 1.7210, -2.8993, -3.5538]], grad_fn=) rhs_data = tensor([[-0.4696, -0.7178, -0.1673, 1.2336, 1.8181], [-0.4927, 1.5876, -0.9288, -0.8321, 0.7550], ...456, -0.9458, 0.9793, -2.3248, -0.0052], [-2.2309, 3.6036, -1.1320, 2.7364, 1.4736]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.4705, 5.6044, 3.2821, 0.4232], [ 1.0011, -0.2358, 2.1416, -1.0516], [-2.2868, 2.6409,...17], [-0.1693, 2.1667, 1.2084, 1.1679], [-1.9563, 2.1514, 1.5423, 0.6475]], grad_fn=) rhs_data = tensor([[-1.7161, -0.1187, -3.8107, 4.7277], [-1.8614, -0.1279, -0.9785, -0.0991], [ 0.6916, -0.6875,...66], [-0.4105, 1.0508, -1.4779, 2.9405], [ 1.5260, -1.5907, -2.8946, 1.4005]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.4309, -1.6022, 1.2219, -0.5968], [ 0.7681, 1.4112, -1.5525, 1.1722], [ 1.7247, 2.5229,...18], [-1.6049, -3.0740, 1.7911, -1.2084], [-0.9855, -1.2082, -0.1131, -0.2469]], grad_fn=) rhs_data = tensor([[-0.4095, -1.5472, -1.2292, 0.4058], [ 0.0172, 0.8372, 1.7636, -1.2318], [-1.0580, -3.2101,...97], [ 1.7067, 3.8112, -0.2493, 0.2017], [ 1.4787, 2.0141, -0.9155, 0.9174]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.1911, -0.4616, 1.2143, 1.1113], [ 4.9592, -1.4097, -1.1115, -2.7368], [-0.7983, -0.7609,...22], [-1.7657, 1.0233, -0.0615, 0.9180], [ 0.4996, 0.9434, 1.0623, 0.2781]], grad_fn=) rhs_data = tensor([[ 0.0516, 3.8297, 1.5290, -1.2061], [-1.1254, -2.4673, -4.0393, 0.9669], [ 0.3320, 1.3885,...78], [-0.3420, -2.2822, -0.8262, 0.5787], [ 0.8054, 0.0079, 1.7885, -0.7706]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.0770, -0.0182, 2.7876, -0.2004], [-2.5724, 1.4596, -1.5684, -0.3723], [-0.9130, 0.3625,...87], [ 1.5599, 1.8954, 0.2003, 0.8250], [-1.8670, 1.3862, -0.1251, 0.5208]], grad_fn=) rhs_data = tensor([[-0.5586, 1.2995, 0.4686, -3.2376], [ 0.8194, 3.0440, -1.2207, 2.0622], [-0.4392, 4.5676,...44], [ 1.1046, -1.5310, 2.4979, -0.2353], [ 1.2784, 1.2943, 2.8227, -1.5528]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.2541, -0.7663, 1.9420, 0.0404], [-0.5131, -1.1529, -1.1021, 0.5091], [-0.1418, -1.5617,...32], [-0.9856, 1.2223, -1.0214, 2.2359], [ 0.3141, 1.1446, -0.0780, 0.0148]], grad_fn=) rhs_data = tensor([[ 1.4962, 0.3621, -0.1691, -0.1641], [ 3.4366, -0.4130, 0.2263, -2.2695], [-1.6926, 0.3727,...44], [-1.9140, -0.4491, -2.2426, -1.2523], [ 1.4527, -0.3189, 2.7364, -0.2511]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-2.6586, -2.5621, -1.5969, -0.3606], [-4.0109, -5.7380, -0.9841, -0.5199], [-4.1142, -2.5933,...14], [ 0.3800, 1.0556, -0.4602, 0.7014], [ 1.2257, 1.0313, 1.9744, 1.8609]], grad_fn=) rhs_data = tensor([[-1.6076, -2.1986, 0.7022, 0.9287], [-3.0533, 1.2627, -0.6349, 0.3400], [ 0.6336, -3.4636,...36], [ 0.9242, 0.9622, 0.7106, 1.0568], [ 0.7987, 1.0452, 1.1953, -0.9553]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.1330, -4.3771, -3.4253, -1.9938], [-1.6464, -0.9109, -1.1013, 0.1960], [-0.1609, -0.7915,...71], [ 1.6249, 0.4494, 1.2005, 0.2167], [-0.3224, 0.5720, -2.3798, 1.1342]], grad_fn=) rhs_data = tensor([[ 3.3954, -2.6274, -1.4209, -1.3720], [ 0.4453, -1.5269, -3.4375, 1.8051], [-1.5277, -0.3332,...76], [-0.3833, 1.6089, 1.4920, 0.2292], [ 0.5944, 0.3316, -0.2435, -1.0885]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.0806, 1.8655, -0.5368, -0.1033], [-2.4902, -0.5509, -1.6514, 1.1719], [-3.8806, 0.9989,...04], [ 2.7465, 0.0502, -0.6588, -0.1011], [ 1.5691, 0.1306, -0.5738, -0.9420]], grad_fn=) rhs_data = tensor([[ 0.3695, 1.4046, -0.9196, -1.6754], [ 3.1955, 0.2724, 1.1049, 0.5488], [-0.2718, 2.7186,...93], [ 0.2168, -1.1918, -0.9921, -1.2101], [ 0.6106, 0.1939, 0.1893, -1.2250]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.9970, 1.5860, -0.9695, 1.7324], [ 1.3100, 0.6790, 0.1797, 2.1306], [-1.4844, -0.5437,...49], [-0.1265, 2.6317, 1.4199, -1.0537], [ 0.5047, 0.0384, 1.0749, -0.2805]], grad_fn=) rhs_data = tensor([[-3.1614e+00, -6.0420e+00, -2.4869e+00, 1.6781e+00], [-6.4173e-01, -8.4946e-01, -2.5066e-01, -1.7252e... 6.6390e-02, -2.4160e+00], [-1.7252e-01, -5.7819e-01, -1.6768e-01, -8.4636e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.4317, 0.2051, -0.4386, 0.5630], [-2.1512, 0.0136, 1.2346, -1.2071], [-0.8090, 1.5302,...70], [-1.6954, -0.5430, 0.2314, -0.7219], [ 2.1417, -1.3347, -1.9166, 2.8811]], grad_fn=) rhs_data = tensor([[-1.9771, -0.9079, -2.0069, -0.5775], [-0.2323, 0.1495, -0.5343, 2.8827], [ 1.0017, 0.6641,...95], [-1.1003, 0.2216, 0.5521, -1.8042], [-1.0003, 1.2796, -0.3066, 0.0315]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.2188, 2.1927, 2.2811, -0.4799], [-1.0089, 0.4566, 0.3029, 0.1925], [ 2.0592, 1.8421,...56], [ 0.0860, 3.8168, 2.2639, 4.0036], [-0.2505, 2.5956, 0.8886, 1.4244]], grad_fn=) rhs_data = tensor([[ 0.7030, 0.1426, 0.0691, 3.0107], [ 1.9122, 0.6484, 0.2837, -0.9625], [ 0.6895, 1.2586,...01], [ 0.3855, -1.0402, 0.5591, 1.9251], [ 1.6369, 2.1130, -0.3646, -0.3312]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-1-g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.4427, -0.6951, 0.0676, 1.0345], [-2.9837, 0.5889, -0.0792, 0.0990], [-2.1386, -0.7936,...31], [ 0.3877, -0.9499, -0.7087, 1.9725], [ 1.2181, 1.5107, 0.1961, -1.7028]], grad_fn=) rhs_data = tensor([[-0.0164, 0.7725, 0.5298, -1.5721], [ 0.6917, -1.7708, 0.4378, 2.5899], [ 0.4570, -1.1392,...10], [ 0.1479, -0.0720, -1.2991, -0.2711], [-0.7566, 1.0521, 1.0432, -0.5895]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-3.1886, 1.3212, -1.8167, -2.0514], [ 1.3203, -0.6645, -0.1217, -0.2292], [-1.7978, -1.0207,...20], [-0.9057, 0.3921, -1.1115, -1.6404], [-0.1795, 0.6632, -0.4115, 0.7573]], grad_fn=) rhs_data = tensor([[ 0.7254, 0.1973, -0.0808, -0.5974], [ 0.2943, 1.9091, -2.0205, -0.4959], [-0.1741, 0.6722,...51], [ 0.8325, -0.9791, -0.4594, 1.4006], [-1.8936, 0.4258, 3.0047, -1.0940]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.2177, -1.2223, -0.8321, 0.4431], [-1.5519, -3.7386, -1.4848, -1.3837], [ 2.1457, -2.0602,...26], [ 2.9129, -3.4845, -0.0458, 2.3417], [-0.5298, 0.1458, -2.5092, -0.2638]], grad_fn=) rhs_data = tensor([[ 0.6079, -0.1443, -0.3838, -0.8270], [-1.0742, -1.3106, 0.1403, 2.6426], [ 0.6929, 0.9749,...66], [ 1.0807, -2.1269, -1.4642, 1.8988], [ 0.6468, 0.3654, 4.3858, 1.2625]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-2.3643, -2.3627, -1.5377, 2.2136], [ 1.4285, 2.6656, 0.8685, 1.0907], [ 3.4750, -1.9359,...11], [ 0.5168, -0.0379, 2.0932, -0.6070], [-1.7742, -0.7319, 1.0960, 0.1867]], grad_fn=) rhs_data = tensor([[ 4.5253, -1.6406, 0.9484, 0.4256], [ 1.2817, 0.3861, 1.9974, 2.8732], [-2.4887, 3.2689,...84], [-3.8410, -4.7006, 1.6751, 2.0618], [ 0.0761, -2.2310, -2.0439, 0.0618]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.9831e-01, 3.2656e+00, 3.9567e+00, -4.1908e+00], [-2.0610e+00, -1.1100e+00, -3.8260e+00, -2.7758e... -1.1880e+00, -1.8185e+00], [ 6.8576e-01, 2.8559e+00, -9.5661e-01, 2.4070e-01]], grad_fn=) rhs_data = tensor([[ 3.1088, -1.8868, 0.2667, -1.1148], [-1.7955, -2.4547, 4.9673, 4.8564], [ 0.6461, -2.8934,...74], [ 0.4428, -3.0348, 1.8993, 1.2420], [-0.5464, -3.7737, 0.0888, 0.9691]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.9866e+00, -1.4745e+00, -2.2471e+00, -1.9539e+00], [ 2.5841e+00, 7.2719e-01, -1.4797e+00, -1.8424e... -3.9597e-03, 1.1277e+00], [ 4.3326e-01, 8.3875e-01, -3.4387e-01, 9.6885e-01]], grad_fn=) rhs_data = tensor([[ 0.6348, -0.0901, -1.7309, 1.3762], [ 0.7000, 2.3851, -0.5260, -0.1101], [-0.2585, 1.2528,...64], [ 0.0194, 0.3597, -0.3978, -1.9673], [-0.5673, 0.9465, 0.0478, -1.2157]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..._nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.2525, -1.5866, 2.0505, 1.8830], [ 0.5264, 3.4699, -1.9177, -1.7508], [ 0.9481, 1.3392,...71], [-0.0924, -1.6354, 1.8022, -0.8079], [-1.0340, -1.4842, 0.4683, -2.7473]], grad_fn=) rhs_data = tensor([[-0.1480, 0.6609, 1.2678, 0.3669], [-2.5142, -1.2031, 1.2148, -1.1793], [ 0.4836, 0.8027,...57], [-0.3987, 0.8789, -0.6952, 0.2705], [-3.0286, -0.4141, 0.7886, -0.1568]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.7737, 2.2102, -2.6027, 6.1020], [-0.5747, -0.7563, 4.2326, -3.1685], [ 0.5448, -3.2331,...53], [ 1.7322, 1.4712, -1.8540, 4.2923], [-0.1973, 3.3409, -1.5507, -0.7583]], grad_fn=) rhs_data = tensor([[ 0.4175, 1.1364, -4.1467, -1.5625], [ 0.3793, 0.8213, 3.7015, -1.6664], [-0.2226, -1.3215,...95], [-0.3825, 1.0516, -3.8249, -1.5104], [-1.0052, 0.1070, 1.3424, 1.2628]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.2843, 0.3852, 0.1904, -1.4449], [ 2.8285, -0.4158, -2.3034, -0.3183], [-1.7569, -0.3601,...41], [-0.8128, -1.7875, 1.3410, 2.2222], [-4.4570, -1.5183, 1.3367, 2.5890]], grad_fn=) rhs_data = tensor([[ 0.3204, -0.3112, 0.2016, -0.3800], [ 0.0988, 1.4551, 0.2763, -1.8006], [-2.3225, -2.9573,...44], [-2.2619, -1.0183, -0.7870, 2.0058], [-0.1445, 1.1549, -1.7810, 0.2983]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.7403, -2.6611, 2.0726, -0.1620], [ 0.1937, -3.9163, 2.7279, -0.3411], [ 0.0943, 0.1539,...84], [-0.2559, -2.0848, 1.9536, -0.4397], [ 0.9811, -2.8739, -2.6099, -1.8018]], grad_fn=) rhs_data = tensor([[-1.9081, 4.1517, 2.4007, 3.4005], [-0.2704, 1.5704, -0.4303, -1.6884], [ 2.2988, 1.9768,...25], [-1.3410, 0.5842, -1.2046, -0.3630], [ 2.1658, 2.4787, 1.4649, -0.0976]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.1629, 0.0876, 0.9421, -1.8273], [ 1.1288, 0.0362, -0.6557, 1.4832], [-1.5502, -1.4937,...43], [ 3.5529, 2.4599, 0.6221, -1.9668], [ 0.3306, -0.3822, -0.6408, 0.7038]], grad_fn=) rhs_data = tensor([[ 1.9198, -1.0479, -0.1992, 0.8821], [-1.1449, -0.6629, -2.5579, -1.4012], [-1.4192, -0.3162,...03], [-0.5087, 2.1324, 0.7303, 4.4128], [ 0.3148, -1.7595, -1.7210, -1.4954]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.0631, 0.8938, 1.1200, 1.9751], [ 1.8232, 1.4729, 2.2568, -0.0371], [ 0.1926, 0.4803,...69], [-1.2921, -4.3989, -2.3871, 1.9124], [-0.8024, 1.5462, -0.3333, -1.3744]], grad_fn=) rhs_data = tensor([[-2.9503, 1.3268, -0.3853, -2.6746], [-0.1582, -0.2348, 1.0995, 0.3570], [-2.4720, 1.2746,...31], [ 0.6830, 3.4650, -3.8762, -0.8625], [-2.1446, 0.5766, -0.1155, -0.7594]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-1-5-g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(4,), dtype=torch.float32), 'f_nj': Scheme(shape=(4,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.0509, -0.5268, 0.0349, -0.6368], [ 0.8334, -0.3870, -1.3914, 2.7050], [-0.0502, 0.1761,...63], [ 0.8315, 0.5500, -0.2115, -0.5934], [ 0.5705, 0.8046, 0.5534, -0.3663]], grad_fn=) rhs_data = tensor([[ 0.5355, -1.1845, -2.1908, 0.4937], [ 0.0783, 1.6918, -1.5063, -1.1174], [ 0.1490, 1.2118,...79], [-0.7770, -0.1401, -2.1277, 0.7843], [ 1.7185, -2.3144, -0.0133, -0.5802]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..., 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.3418e+00, -2.5311e+00, 2.4129e+00, -9.1697e-01, 2.8740e+00, 9.7840e-02, 8.7723e-01, -3.5627e-..., 1.4117e+00, -1.1561e-02, 1.6876e-01, 1.0952e+00, -1.6553e+00, 9.4926e-01]], grad_fn=) rhs_data = tensor([[ 1.9326e+00, 2.1042e-01, -4.2754e-01, -9.8492e-01, -2.8375e-01, 1.7402e-01, -1.4331e-01, 1.1612e+..., -1.6642e+00, 7.5962e-01, -8.4145e-01, 2.4269e-01, 3.2455e-01, -1.1221e+00]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..., 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.0858, -1.4119, 0.3398, 1.8595, -0.6600, -0.6716, 1.5300, 0.9475, 0.0602, -0.9475, 0.9366, -...1314, 0.0681, 0.3897, 1.3603, -0.0389, -0.3280, -0.0632, 2.1367, -0.2880, -0.9100]], grad_fn=) rhs_data = tensor([[ 5.1795e-02, -1.2399e+00, 6.2994e-01, -2.7987e+00, -1.4160e+00, 5.1281e-01, -4.0056e-01, 3.3425e-..., -2.7116e-01, -2.0198e+00, -2.5785e-01, 1.7359e+00, -2.9486e-01, -1.0544e+00]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.0592, -2.9019, -0.4141, 1.4615, 2.4652, -0.5613, 0.1637, -0.5940, 0.3767, 1.6885, 0.4685, -...8940, -0.2568, -2.3942, -1.1186, -0.8192, 2.5201, -0.5623, -0.2988, 0.6815, 0.5842]], grad_fn=) rhs_data = tensor([[ 0.8483, -0.9579, 0.1241, -0.6175, -0.8686, 1.9778, -3.0819, 0.9716, 1.8992, 0.4874, 0.8220, ...5765, 0.6131, -0.9150, 1.1984, -1.6667, 0.8757, 0.6425, 0.6694, -2.4310, 0.9502]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.1659, 1.3182, -1.1311, 0.7880, 0.1638, -1.6882, 1.0765, -0.2963, -1.1422, 0.0067, 0.3497, -...8238, -0.2561, -0.2937, -0.0645, -0.1885, -0.1951, -1.3943, 1.5823, 0.0948, 1.1425]], grad_fn=) rhs_data = tensor([[ 1.1902e+00, 1.2980e-01, 2.5972e-01, 2.5151e+00, 9.7190e-01, 1.3649e-01, -1.1707e+00, -5.0471e-..., 1.1449e+00, -2.9666e-01, -5.9871e-01, -1.4830e+00, 2.4847e-01, -1.5507e+00]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.3720, -1.8935, -0.4162, 0.5520, -1.4524, 0.1448, -0.4728, 0.4514, 1.7599, -0.8936, 0.0332, ...5884, -1.5337, 0.3644, -1.2351, 0.0875, 0.0422, -0.6128, 3.5685, -1.8449, 0.5394]], grad_fn=) rhs_data = tensor([[ 6.3970e-01, 1.6601e+00, -2.3311e+00, -2.0783e+00, -6.8123e-01, -4.8562e-01, -1.8884e+00, -2.5141e-..., 5.2333e-01, 9.9181e-01, -1.7940e+00, -7.5259e-01, 1.7487e+00, 1.8527e+00]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-4.4231e-01, -1.5895e+00, 1.0512e+00, 1.6224e-01, -1.7813e+00, -7.0237e-01, 6.1579e-01, -3.8878e-..., 1.1537e+00, 5.5554e-01, -3.6554e-01, -1.3466e+00, -1.6178e-01, -6.8565e-01]], grad_fn=) rhs_data = tensor([[-0.5089, 1.7619, -1.4550, 2.9052, -1.6036, -2.6163, 1.5313, -0.4541, 0.0209, 0.6515, 0.0631, -...5678, 0.6605, -0.4022, -0.2934, 0.0829, 1.2738, 0.5716, 0.4643, -0.1542, -0.7089]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.4051e+00, -1.6456e+00, 8.6499e-01, 2.1046e+00, -3.2655e-01, 6.8975e-01, 1.7024e+00, 3.4062e-..., 3.7050e-02, 1.8741e+00, -5.0038e-01, -3.1436e-01, 7.5653e-01, -2.1477e-01]], grad_fn=) rhs_data = tensor([[ 0.6566, 1.5460, -0.9208, 0.5337, -1.3848, 0.9384, 0.3511, -0.3051, 1.3190, -0.3054, 1.9916, -...4495, -1.5011, 0.7656, 1.2375, 0.8733, 1.2703, -0.1096, -0.3807, -2.6003, -0.3046]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.5827, -0.7517, 2.2143, 0.1396, 1.5923, -0.2242, 1.5345, -0.5487, 2.2328, 2.6643, 1.1048, -...1692, 0.9636, -0.8558, -0.9104, 0.5204, 0.7929, 0.2878, -1.3475, -0.7272, 2.6748]], grad_fn=) rhs_data = tensor([[-0.6046, 0.8751, 1.5239, 0.3035, 2.6308, 0.1682, -0.1261, -1.8990, 0.5793, 1.5350, -0.2932, ...0225, 0.7329, 1.7413, 0.2510, 2.4960, 3.2739, -2.0400, 0.7201, 0.9248, 0.5579]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.1489e+00, -9.4475e-01, -6.9973e-01, 2.7737e+00, 4.2542e-01, 6.1822e-01, -5.8451e-01, -1.7896e-..., 2.1459e+00, -2.0947e+00, 1.7494e+00, -1.2161e+00, 1.3367e-01, -2.2884e+00]], grad_fn=) rhs_data = tensor([[ 0.6186, 0.2262, 0.0786, 1.3415, -0.3437, 2.1063, 0.7720, -0.2089, 2.4058, -2.0210, 1.1341, ...1026, -0.8183, -0.5657, 0.5970, -0.8751, -2.0710, -0.1378, 0.3012, 1.9311, -0.5781]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.1110, 0.8908, 0.8170, 1.6353, 0.5071, 1.1498, 0.8443, -0.2035, 0.3475, 0.1749, -1.2598, -...0959, -0.1554, -1.0228, -0.8037, 0.8342, -0.9882, -1.3433, -0.9329, -0.5659, 1.2995]], grad_fn=) rhs_data = tensor([[ 0.4166, -1.2834, -0.6489, 0.4947, 1.1288, 0.1873, 0.4988, -0.1183, 1.0769, -1.0882, -0.4656, ...1004, 0.5031, 0.1384, 1.4942, 0.2226, -0.0404, -0.4624, -0.7095, 1.2584, 0.6456]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.8046, 0.7871, -0.7802, -1.1572, -0.6657, 1.3246, -0.4318, -0.8336, 0.0958, -0.8787, 1.0474, -...8857, 2.1098, 0.3639, 0.9637, 1.7194, -0.0812, 4.0037, 0.3620, -2.5374, -1.3716]], grad_fn=) rhs_data = tensor([[-0.1920, -0.7119, 0.9303, -1.0677, 1.7210, 0.4289, -1.0316, 1.0797, 0.6077, 0.3135, -0.7021, -...0760, 0.8812, -1.4685, 0.5581, -0.2039, 0.6168, 0.9647, -0.1308, -0.8288, -1.9422]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-1-g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-4.0623e-01, -8.9916e-01, -1.3417e+00, 6.8556e-01, 1.3740e-01, -4.4514e-01, -9.5944e-01, 9.3194e-..., -4.0303e-01, -2.3050e+00, 1.2788e+00, 1.2249e+00, 1.3205e+00, 3.2922e-01]], grad_fn=) rhs_data = tensor([[-1.5288e-01, 3.5973e-01, 7.5185e-01, 6.4194e-01, -6.3932e-01, -1.0915e+00, 1.3648e-01, 7.8767e-..., -3.1102e+00, -1.4462e+00, -1.7412e-01, -2.0356e+00, 5.6373e-01, -5.1349e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..., 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-6.8408e-01, -4.4241e-01, 9.9652e-01, -8.2476e-01, 2.4521e+00, -6.4895e-01, -3.7110e-01, 9.6443e-..., -2.9267e-02, 2.3671e-01, -1.0841e-01, 1.1537e+00, 9.7124e-02, 5.2890e-01]], grad_fn=) rhs_data = tensor([[ 0.5928, 0.9564, 0.4271, 3.6479, -2.3172, -0.6711, -0.1610, 2.0091, 1.4395, -1.1904, 0.9926, ...1439, 1.3852, -1.7436, -1.4168, 0.4069, -2.2603, -0.1702, -1.2209, 1.0660, 0.5498]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh..., 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.5943e+00, -2.9214e-01, -1.8768e+00, -9.2384e-01, -2.1452e-01, 2.1503e-01, 9.5467e-01, 1.1005e+..., -7.5066e-01, -3.2543e-01, 5.8007e-01, -1.2702e+00, 5.2915e-01, -7.8763e-02]], grad_fn=) rhs_data = tensor([[ 1.5485e+00, 3.0599e-01, -1.1678e-01, -9.8203e-01, 6.5697e-02, 1.2137e-01, -6.3660e-01, 6.7201e-..., -1.4764e-01, -2.9830e-01, -1.0045e-01, -8.2377e-02, -6.7796e-02, -1.0676e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.8545, -3.3729, 0.5817, 0.6089, -1.8478, -1.7260, -1.2018, -0.8049, -1.2596, -2.1611, 1.2954, ...3658, 1.0334, -1.2622, 0.6242, 1.5942, 0.7616, -2.2061, -0.1066, -1.7449, -1.9504]], grad_fn=) rhs_data = tensor([[-1.9390, 1.3609, -1.1060, 1.1583, -0.7940, -0.7214, -1.2001, -0.7164, 1.8448, -0.3010, 1.0260, -...4955, -0.6047, -0.1624, 0.4444, -0.3747, 0.4804, -1.4386, -0.8819, -1.4224, 0.9308]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.4643, 0.3809, 0.6707, 1.5284, 1.6854, -0.4055, -1.1722, 2.2317, 0.6627, 0.4365, 1.1575, ...7317, -1.0370, -0.2921, -0.9101, -0.1197, 0.8746, -0.6404, 1.2474, 0.4315, 1.5950]], grad_fn=) rhs_data = tensor([[ 6.3378e-01, -1.2730e+00, -2.4526e+00, -1.1860e+00, -1.4681e+00, 2.0937e+00, -1.0424e+00, -4.3698e-..., 9.4171e-01, -2.1466e-01, -4.3827e-01, -6.7339e-01, 8.0752e-01, 1.6748e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 6.1441e-01, -1.1214e+00, -4.1723e-01, 5.8946e-01, -1.3968e+00, 7.2983e-01, 2.9579e-01, 1.1614e+..., 9.1684e-01, -8.6391e-01, 1.7200e+00, 1.3682e+00, -1.9824e+00, -3.4557e-01]], grad_fn=) rhs_data = tensor([[-1.2485, -0.2628, 0.4073, 0.0113, 1.2264, -0.9985, 1.2004, 1.1076, -1.0174, -0.6624, 0.3301, -...5793, -1.7182, -1.2066, -2.0637, 0.4166, 0.4039, 0.4172, -1.8494, 1.2681, 0.2591]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'f_ni': Scheme(sh...nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-7.7342e-01, -1.3228e-02, -2.4738e+00, -2.7465e+00, 2.3475e-01, 1.2156e+00, -2.1778e+00, -9.6917e-..., 8.9174e-02, 2.0534e-01, -1.0169e+00, -5.6891e-01, -1.7862e+00, -1.7301e+00]], grad_fn=) rhs_data = tensor([[ 6.5833e-01, 2.2887e+00, 5.7301e-01, -7.5700e-02, -1.6388e+00, 3.5509e+00, -7.7818e-01, 8.8199e-..., -1.6652e+00, -1.3639e+00, 6.2213e-01, 1.8194e+00, -5.9530e-01, 6.4701e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.8960, 0.6549, -1.6366, 0.1678, -0.6300, 2.4284, -2.3742, -0.9172, 1.5790, -1.0337, -1.6099, ...6177, 0.0981, 3.0807, 1.4327, -1.6914, 0.9035, 2.6730, 0.0211, 0.2691, 0.8100]], grad_fn=) rhs_data = tensor([[ 1.0084e-01, -1.6607e+00, -2.4339e+00, -2.6906e-01, -2.5486e-01, -1.4958e+00, -5.3673e-01, 1.2756e-..., 7.5038e-01, -1.4541e+00, 3.1389e-01, 1.4213e+00, 1.3720e+00, 1.0100e+00]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.0281e+00, -8.4660e-01, 3.2056e-01, 8.7663e-02, -2.1813e+00, 4.2587e-01, 2.2207e-02, 2.9588e+..., -1.1805e+00, 6.4614e-01, -6.1611e-01, 2.0874e-01, -2.8085e-01, 1.4730e+00]], grad_fn=) rhs_data = tensor([[ 4.3329e-03, 2.0706e-01, -3.4503e-01, -2.4975e+00, 1.0312e-01, 3.8880e+00, 1.0967e+00, -9.5148e-..., 1.8279e-01, 1.1232e+00, 1.0245e+00, -2.1430e+00, -1.1878e+00, 3.6890e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-7.0024e-01, -2.1143e+00, 9.4483e-01, -1.0089e+00, -4.3028e-01, 1.0309e+00, 9.6889e-01, 1.2470e+..., 2.6410e-01, -9.4642e-01, -1.0746e+00, 2.1012e+00, -7.6031e-01, -8.9569e-01]], grad_fn=) rhs_data = tensor([[-1.0757, -0.1064, -0.1964, -0.5468, -1.3376, 0.0035, -0.1566, -0.2380, 0.1809, -0.2126, -0.5209, -...2021, -2.3571, -0.3253, 0.2685, -0.0733, -0.7760, -0.2067, 0.8493, 2.6851, -0.6573]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.0632, -0.0524, 0.9609, -0.7443, 1.4727, 1.0182, -0.3096, -1.4167, -2.1141, 0.7572, -0.9319, ...6902, 0.9121, 1.1594, -0.0607, 0.2103, -0.7947, 1.5204, -0.5483, -1.9902, 0.1371]], grad_fn=) rhs_data = tensor([[-0.7836, 0.1587, -0.4091, -1.9545, -0.4315, -0.3215, 1.9764, 1.7557, 1.8379, 0.1764, 0.6760, -...0893, 0.2386, -0.5311, 1.3889, -0.8549, -0.1964, -0.5495, -0.8813, -1.7497, 1.2059]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.7611, -1.6732, 1.6232, -0.2803, -0.2467, 0.1703, 0.3493, -0.2968, 2.3206, -1.0701, 2.6926, -...6046, -1.1256, -0.8574, -0.1569, 0.6474, 1.0157, -0.8739, -0.1974, 0.2860, -0.7708]], grad_fn=) rhs_data = tensor([[-8.2038e-01, -1.2606e+00, 6.6014e-02, 8.6488e-01, -2.9163e+00, -7.5656e-01, -5.7392e-02, 4.2107e-..., 1.1068e-01, 9.3745e-02, 8.5721e-01, -1.8557e-01, 1.0382e-01, -7.3513e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_egat_conv[4-5-5-g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'f_ni': Scheme(shape=(20,), dtype=torch.float32), 'f_nj': Scheme(shape=(20,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=10, in_edge_feats=5, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = F.randn((g.number_of_nodes(), 10)) efeat = F.randn((g.number_of_edges(), 5)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:564: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-2.0328e+00, -2.8940e+00, 1.0430e-01, -6.0096e-01, -3.2215e-01, 9.1139e-01, -2.6772e+00, -8.2305e-..., 4.1666e-01, 2.2483e+00, -6.4060e-01, -3.6514e-01, -1.1272e+00, 1.8958e-01]], grad_fn=) rhs_data = tensor([[-2.9879e-01, -3.1880e-01, -1.1936e+00, -9.3224e-01, 9.8483e-01, -8.1061e-01, 8.9513e-01, 3.1228e+..., -4.9517e-01, 3.7901e-01, 1.8508e+00, 3.1310e-01, -2.7327e-01, -1.2691e-01]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-1-1-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.8240], [-2.1112]], grad_fn=) rhs_data = tensor([[-0.7571], [ 0.5796], [-0.9385], [-2.8028]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-1-1-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.3210], [ 1.6125]], grad_fn=) rhs_data = tensor([[-1.1213], [ 2.6075], [ 1.9127], [ 0.4586]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-1-1-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-3.0569], [ 0.7531], [ 0.4107], [-0.8140], [ 5.4511], [-0.4450]], grad_fn=) rhs_data = tensor([[-0.5495], [ 1.7457], [ 3.0253]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-1-1-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_node_feats = 1, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.1048], [-3.5164], [-1.3283], [-2.1665], [-4.5384], [ 2.3528]], grad_fn=) rhs_data = tensor([[2.3836], [0.4488], [3.1381]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-1-5-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.9555], [-0.1825]], grad_fn=) rhs_data = tensor([[-1.1969], [-0.8480], [ 0.0436], [ 1.7280]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-1-5-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.2543], [ 0.5183]], grad_fn=) rhs_data = tensor([[-1.5706], [-1.0798], [ 0.0327], [-2.8392]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-1-5-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.4686], [ 1.9217], [-2.6619], [-1.0409], [ 0.2924], [-2.1099]], grad_fn=) rhs_data = tensor([[3.0384], [2.9239], [1.5217]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-1-5-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_node_feats = 5, out_edge_feats = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.1670], [ 0.2266], [ 1.2627], [ 0.7198], [-0.1601], [-1.8073]], grad_fn=) rhs_data = tensor([[ 2.8252], [-1.1598], [-3.1703]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-5-1-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.3146, 2.0922, -1.2694, -0.0680, -0.7776], [-1.0541, -0.4320, -1.4617, -2.2023, 0.1421]], grad_fn=) rhs_data = tensor([[-1.0656, -0.0955, -0.4764, 1.2581, 1.5510], [-1.8763, -1.9493, 0.3834, -0.4012, 1.2087], ...614, 0.2427, 0.1690, 0.7810, 0.6016], [-3.1626, -2.6896, 1.4366, 1.1721, -0.5961]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-5-1-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.2371, 0.2344, 3.8942, -1.2320, 1.0402], [-1.5102, -0.1980, -2.0674, 1.0480, -1.8496]], grad_fn=) rhs_data = tensor([[ 2.2017e-01, 3.4319e-01, 2.4685e+00, -2.3145e+00, 3.5856e+00], [ 8.9920e-02, 1.2209e+00, 3.5252e... -2.1515e+00], [-1.2832e+00, -2.4450e+00, -1.0793e+00, 1.7236e+00, -1.9500e+00]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-5-1-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.0916, -1.4950, -0.7666, -0.4751, 0.5945], [-0.3357, 0.3355, 0.2148, 2.9543, 0.0321], ...161, 2.2725, -2.6201, -0.4652, -1.8011], [-0.3900, -1.0558, 0.7978, 0.7933, 5.4721]], grad_fn=) rhs_data = tensor([[-0.9215, 0.8658, -2.3865, 1.3820, -0.6142], [ 0.4072, 1.6119, 4.1919, -1.5234, 0.4493], [-0.8402, -0.2129, -4.0229, -3.5910, -0.1222]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-5-1-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_node_feats = 1, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.7888, -1.2142, 0.2092, -3.8654, 0.4653], [-0.5668, 0.8523, -1.1037, -0.9767, 2.9824], ...872, -0.8212, 0.1097, -1.7158, 1.7991], [-1.1656, 0.6755, 0.8520, -1.2901, -4.5112]], grad_fn=) rhs_data = tensor([[-0.2197, -1.6336, -3.5178, -0.2170, 1.6726], [ 0.2573, 4.4163, -1.4093, 1.1220, 1.7720], [-0.4822, 2.2817, 1.0312, -1.8688, 1.0071]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-5-5-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-2.2929, 1.3306, 1.2237, 2.6639, 3.3122], [-1.5867, 1.2147, 4.4359, -0.5106, -1.9450]], grad_fn=) rhs_data = tensor([[ 3.8562, -2.1296, 0.0457, 1.3962, -0.3416], [-3.0199, 0.2560, 0.9294, -3.2362, -0.7833], ...829, -0.0402, -2.3186, 1.4928, -0.4964], [-0.2262, 3.9483, -0.3972, -2.3051, 0.6604]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-5-5-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.6821, 1.9085, -1.0450, 0.9774, -2.2073], [ 1.6780, -1.2726, -2.6771, -1.5571, 0.6557]], grad_fn=) rhs_data = tensor([[-2.1100, -1.8739, -1.0670, 1.2892, -2.3772], [-0.1286, -0.0339, -1.1950, -0.3152, 0.2632], ...022, 0.6798, 2.1355, -0.6326, -1.3051], [-1.6236, -2.0882, -0.6534, -2.7557, -0.2837]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-5-5-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.9805, 1.0242, -0.3608, 0.6343, 0.9700], [-0.9685, -0.9643, -2.3260, -0.6701, 0.8226], ...471, -0.4268, 0.4982, 3.3154, -0.1516], [-0.6576, -0.4776, 0.7304, -2.0443, -1.5253]], grad_fn=) rhs_data = tensor([[-0.8454, -0.9631, -0.3690, -0.6729, -0.2060], [-0.3451, -1.3987, 0.1778, -1.9388, 0.5186], [ 0.2851, -0.9529, -1.6814, -0.5284, 1.5315]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[1-5-5-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_node_feats = 5, out_edge_feats = 5, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-2.0645, 0.8260, -1.3832, 1.7390, -0.2179], [ 1.5741, 1.8801, 1.8280, -2.6369, 2.7830], ...763, 0.5599, 1.3475, 2.1505, 1.6288], [ 0.4243, 2.7415, 1.0676, -0.2745, 1.3554]], grad_fn=) rhs_data = tensor([[-3.1130, -1.0786, -1.7205, 0.5425, -1.2976], [-1.4849, 1.0256, -2.4215, 3.2899, -2.9724], [-1.0300, 0.2692, -0.4148, 0.7324, -0.6202]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-1-1-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.1962, -1.7739, 0.8247, 0.0619], [-0.4716, -1.6367, -1.1099, -1.4762]], grad_fn=) rhs_data = tensor([[ 1.3422, -0.9957, 0.1040, -0.7622], [ 0.3082, -0.4750, 0.8199, 0.7947], [ 0.8280, -4.0873, -0.0076, -2.0162], [ 1.0438, 0.4035, -0.0360, 1.9571]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-1-1-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.8192, 0.2947, -0.2068, -1.5144], [-4.5291, 3.0066, -0.7048, -0.8271]], grad_fn=) rhs_data = tensor([[-2.6707, -2.7875, -3.1676, 3.0346], [ 0.5352, 2.5069, -0.3904, -2.1632], [-2.3969, 0.5511, -3.9606, 0.0391], [-0.8857, 1.0355, -0.9382, -1.2227]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-1-1-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.6757, -0.4633, 0.5540, 1.0501], [ 0.1781, -0.8037, 1.2925, -1.4955], [-1.4085, 0.3828,...84], [-0.1253, -0.5181, 1.7059, -2.7104], [-0.3109, -1.4386, 0.1302, 0.0192]], grad_fn=) rhs_data = tensor([[-0.2652, 0.7894, 0.1817, 0.4694], [-0.7414, -0.3457, -0.0176, 0.2824], [ 1.0822, 0.1103, 0.3402, -0.5809]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-1-1-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_node_feats = 1, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.4065, -0.7620, 1.0641, -2.9007], [ 2.0265, 1.9998, -1.1969, -0.8955], [-1.4233, -1.3512,...41], [ 1.3946, 1.1776, 0.3965, -0.1368], [ 1.5669, 1.9249, -0.7914, 2.0394]], grad_fn=) rhs_data = tensor([[ 2.8247, -2.0020, -0.0164, 1.3055], [-1.7450, 0.5076, -0.8277, 2.4379], [ 2.0831, 0.5127, -2.3217, -0.0316]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-1-5-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.2626, -1.4545, 1.2638, -0.8761], [ 2.0661, -1.7082, 1.0934, -1.4949]], grad_fn=) rhs_data = tensor([[-0.1110, 1.7318, -1.5468, -1.7342], [ 2.8438, 0.5388, -2.1282, 0.6741], [ 2.6437, -1.8629, 1.1669, -0.9728], [ 1.0125, 0.9813, 0.3377, 1.0628]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-1-5-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.6508, 3.3676, -1.8081, 2.1275], [ 0.0592, 0.3606, 0.5148, -0.1441]], grad_fn=) rhs_data = tensor([[-0.6177, 0.2453, -2.3402, -1.2833], [-2.9738, -0.0109, -1.9707, 0.7520], [-0.5154, 1.0631, -1.7359, -0.3899], [ 2.2389, 0.5466, 1.6230, 0.8275]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-1-5-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.0990, 1.2184, 0.4267, -0.5217], [-2.5857, -0.6303, -1.6209, -0.2791], [ 0.4903, 3.1664,...10], [-0.0808, 0.9886, -0.2650, 1.4255], [-2.0096, -2.1572, -0.5341, -0.2613]], grad_fn=) rhs_data = tensor([[ 1.8884, -1.6914, -1.1863, -2.7802], [-0.4494, 1.7968, 0.2263, 2.5877], [ 1.3491, -0.7064, -2.3377, 0.4425]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-1-5-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_node_feats = 5, out_edge_feats = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.2024, -0.1988, 0.8150, -3.4943], [ 1.4453, 0.7416, -1.7601, 1.6785], [-1.1386, -1.1007,...94], [-0.0228, -1.1398, -2.9624, -4.8255], [ 0.3311, -0.8166, -4.0241, -2.7917]], grad_fn=) rhs_data = tensor([[-1.3706, 0.2851, -2.7871, 0.5717], [-2.8678, -3.6779, -0.0455, -0.5709], [-1.8704, -1.1826, -2.3336, 1.5540]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-5-1-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.4097, 1.8984, 0.3551, -0.4480, -0.0687, 0.4001, -0.0705, -1.3342, 0.1790, -0.2992, -0.6846, ...1696, -0.0989, 0.0766, 1.0797, 0.3481, 0.4213, -0.5149, 0.2680, -1.7514, -0.5989]], grad_fn=) rhs_data = tensor([[ 0.4364, 1.4956, -0.8586, 0.2187, 0.8939, 0.8513, 1.2006, -0.6805, -0.7382, -0.0431, -0.0510, ...4594, 2.6749, 0.4502, 0.0860, -1.8500, -1.4749, -0.3614, 0.0621, -1.3884, 1.5933]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-5-1-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.3011, -0.5376, 0.3123, -0.4423, -0.0936, 0.1586, -0.1759, -0.1315, -0.3151, 0.2314, 0.0671, ...9736, 0.5245, 0.7731, -1.3618, -0.5584, 0.5466, -0.9599, -2.0818, -0.2300, 0.5293]], grad_fn=) rhs_data = tensor([[ 3.2553, 2.3239, -0.1054, 0.6926, -0.1579, -2.2509, -0.1216, 0.8011, 0.3188, 1.1837, 0.5312, ...7275, -0.1071, -1.4916, -0.7246, 0.7349, -0.9231, 0.1787, 0.0780, -0.7637, 1.1226]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-5-1-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.2073e+00, -2.6027e-04, 3.4743e+00, 2.7274e+00, -8.1739e-01, 1.3504e+00, -7.8289e-01, 6.1971e-..., -1.0887e+00, -9.9952e-01, 1.9203e-01, -2.6045e-01, 4.8717e-01, -6.8592e-01]], grad_fn=) rhs_data = tensor([[-0.5644, 0.0285, 0.2939, -0.3236, -0.6025, -2.1658, 0.2563, -1.6114, -0.0392, 1.7581, -1.0836, -...7246, 0.5448, -0.1695, -3.4277, -1.2917, 1.1475, -0.9609, 1.9271, -3.8454, 0.4098]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-5-1-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_node_feats = 1, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.6426, 0.8838, 0.3667, 1.3293, -0.6857, -0.1028, 0.7211, -2.6559, -0.3012, 1.0810, 0.7554, ...0170, -1.0157, 0.9363, -0.9162, 1.8780, -0.8444, 1.3314, -0.1458, -0.4253, -0.5418]], grad_fn=) rhs_data = tensor([[-1.5545, 0.8220, 1.4938, 0.5802, -0.4761, 2.0218, 1.0873, 0.4447, 1.3501, 1.5914, -0.2872, -...5345, -0.3573, 0.3685, -0.4908, -1.6937, 0.3961, 2.6331, 0.3872, 0.1478, 2.9466]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-5-5-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.0046, 0.6153, 1.2086, 0.1239, 0.4620, -0.1714, -0.5258, 0.5707, -0.3771, 1.5341, 0.7290, ...0234, -0.2427, -0.6409, 0.5663, 0.2867, -0.4876, 0.4327, -0.9562, -0.8901, 1.4923]], grad_fn=) rhs_data = tensor([[-0.1430, 0.2631, 0.5120, 3.6576, -1.1857, 0.4625, 0.3265, -0.4424, 1.4593, -2.7570, 0.5860, ...0915, 0.6118, 0.0119, -3.1844, 0.8426, -0.5874, -0.2376, 1.3520, 1.4748, 0.6901]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-5-5-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.9737, 0.6102, 0.0785, -0.9298, 0.5088, -1.0626, 0.9676, -2.0778, 1.1726, 0.1344, -0.3150, -...8315, 1.5974, -1.6376, -0.7398, 2.4316, -0.0376, 0.8019, -1.0263, 0.3901, 0.4874]], grad_fn=) rhs_data = tensor([[ 0.8386, -0.3807, -1.1705, 2.1955, 0.8331, -1.2222, 0.4905, 2.6220, -0.6031, -0.1808, 0.3843, ...2990, 1.7932, 2.6046, 0.7737, 0.1156, -0.7440, 0.3782, 1.7154, -0.0047, 2.8158]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-5-5-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.0281, 0.6139, -0.4069, -0.3891, -0.1169, -0.0984, -0.0637, -0.8928, -0.2248, -0.0232, -0.7644, ...0111, -0.6119, 1.9497, -1.5755, -0.7964, -1.7147, -1.3416, 0.9161, 0.2530, -1.7544]], grad_fn=) rhs_data = tensor([[ 0.0109, 0.4432, 0.2941, -0.1281, 0.1075, -1.5179, -2.7440, 2.7387, 0.7373, 0.0477, 2.1056, -...6200, 0.6960, -1.3931, 1.3815, 0.8730, -0.4332, 0.3742, -0.0612, -2.0617, -0.7192]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_egat_conv_bi[4-5-5-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_node_feats = 5, out_edge_feats = 5, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_node_feats', [1, 5]) @pytest.mark.parametrize('out_edge_feats', [1, 5]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() egat = nn.EGATConv(in_node_feats=(10,15), in_edge_feats=7, out_node_feats=out_node_feats, out_edge_feats=out_edge_feats, num_heads=num_heads) nfeat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), 15))) efeat = F.randn((g.number_of_edges(), 7)) egat = egat.to(ctx) > h, f = egat(g, nfeat, efeat) tests\pytorch\test_nn.py:589: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egatconv.py:210: in forward graph.apply_edges(fn.u_add_v('f_ni', 'f_nj', 'f_tmp')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.0389e+00, 7.1546e-02, -3.4941e-01, 7.5242e-02, 3.9990e+00, 1.3364e+00, 5.6844e-01, 9.8133e-..., 1.2899e+00, -2.0342e+00, 4.1273e-01, -1.7717e+00, -1.2472e+00, -1.5473e+00]], grad_fn=) rhs_data = tensor([[ 0.0752, 1.7975, -1.3407, 1.0803, -0.5635, -0.0950, 0.4054, 0.2015, -1.2371, -1.6147, -0.0560, -...0693, 0.2942, 0.6756, -1.7121, 0.9719, -1.2741, -0.2872, 0.6854, 1.4201, -2.9483]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[mean-g0-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5167, -0.5155, -0.3048, 0.3555, -0.6499], [ 0.2540, -1.6776, 0.3690, -1.3596, -1.4630], ...0.1157], [-1.2600, 0.6454, 1.8325, 0.6069, -0.0750], [ 1.4862, -0.2292, -0.5262, 0.9044, 1.9944]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g0-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8975, 1.1802, -1.5554, -1.4592, 0.2069], [-0.0925, -0.4041, 1.8117, -1.1794, -0.4688], ...3.2642], [-0.5249, 0.0258, 0.3991, 1.9249, 0.0129], [-0.5083, 1.5056, -1.9640, -0.4229, 1.1801]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g1-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6935, 2.1613, -0.1898, 1.0169, 0.2102], [ 0.1352, -0.1715, -0.4494, -0.3250, -0.1710], ...0.9318], [-0.9497, -0.0270, 2.3339, -0.0300, -0.2625], [-0.8269, -1.9289, 0.1916, 0.0808, -1.6697]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g1-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4537, -0.1520, 1.4016, 0.8592, 2.5332], [ 0.7623, -0.2182, 0.5084, -0.0966, -0.0754], ...0.4105], [ 0.0675, 0.1108, -0.5819, -1.5778, -1.9720], [ 0.7386, -0.4546, -0.5872, -0.9520, 0.8888]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g2-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1992, -1.9114, 1.7328, -0.7454, 0.1148], [ 0.8614, 0.4872, 0.6834, 0.1425, 1.2337], ...1.1650], [ 0.2462, -0.1469, -0.1363, 1.0008, -0.4970], [-0.5467, -1.6064, -0.4651, 0.1313, -0.3886]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g2-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.6244, -2.6425, -0.5178, 0.3598, 0.0451], [ 3.6627, 0.0434, -0.4057, -0.4627, -0.5298], ...1.1128], [-0.6951, -0.1278, 0.2457, 0.1586, -0.2663], [ 0.4082, 0.1368, 0.5814, 0.9309, -1.6320]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g3-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6399, -0.3381, 0.0730, -0.5250, -0.5501], [-0.7266, 0.5355, 0.6045, -0.3308, -1.6098], ...1.0480], [ 0.7506, -0.2638, -0.7180, 0.4151, -0.5750], [-0.3106, 0.5783, -0.5130, 0.8730, -0.0179]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g3-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5509, -0.2188, 0.2724, -0.0069, -1.8051], [ 0.6083, 2.4782, -0.4405, 0.4490, -0.2045], ...0.1644], [ 0.0149, 0.1271, -0.7126, 0.0644, 0.0457], [ 1.4510, 1.8559, -0.4459, -1.2784, -2.8244]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g4-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4847, -0.1821, -0.3754, -1.2280, -1.0295], [-1.6525, -0.0031, -0.7570, 0.4880, 0.0231], ...1.0345], [-0.4572, 1.5477, -0.8569, 1.0986, -0.8180], [-0.4444, -1.6869, 0.7497, 0.8708, -0.1252]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g4-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0518, 0.4190, 0.0381, -0.7346, -0.8814], [ 0.2230, 0.3211, -0.1149, 2.4687, -1.0968], ...0.8669], [ 0.2742, 2.9805, 0.1265, -0.8976, 1.0096], [ 0.0063, -0.8539, -0.5710, 0.6938, 0.9824]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g5-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2012, 0.9740, -0.4616, -1.9719, -0.9165], [-0.6598, 1.1900, -1.2940, -0.1118, -0.4931], ...0.4581], [-0.5044, -0.2535, 0.8249, 0.9311, -0.5560], [ 0.7562, 1.7565, 0.2017, 0.4127, -0.4059]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g5-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3978, 1.6531, -0.0331, 1.1014, 0.3209], [-0.2445, -0.5199, 0.5895, 0.9006, -1.9214], ...0.8987], [ 0.2628, -1.0204, 1.7846, -0.3674, 0.4017], [ 0.2405, -0.4377, 1.4141, 1.7520, 0.2006]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g6-idtype0] _______________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.2969e-01, 5.8390e-01, -6.1020e-02, -1.4316e+00, -6.6701e-01], [ 9.9733e-01, 3.7042e-01, -9.6290e...01, -6.6794e-01, 1.6573e+00, -1.1488e+00], [-1.2569e+00, -7.2781e-01, -2.8847e-01, 1.5214e+00, -1.9161e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g6-idtype1] _______________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4544, -0.1587, -1.6297, -3.1991, -1.0804], [-0.5030, -2.0614, 0.4025, -0.1783, 0.3222], ...3.5838], [-0.8967, -1.3463, -0.9606, -0.5578, -0.3875], [-2.1614, 0.8547, -0.7490, -0.8992, 0.1229]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g7-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1797, -0.1389, 0.3035, 0.2275, 1.2560], [ 0.4777, 1.0115, -0.6438, 0.4192, 0.3112], ...0.3222], [-0.9916, -0.0668, 0.5594, -0.5939, -0.8603], [ 0.3453, 0.4358, -1.0278, -0.4006, 1.7030]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[mean-g7-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8330, -0.1078, -0.2348, 0.3756, 1.1148], [-1.2159, 0.2977, 1.3930, 0.0479, -0.1370], ...0.8305], [-0.0466, -0.4962, -0.2039, 0.5966, -1.9703], [-0.5651, -0.1076, 0.4802, 0.7503, 0.2763]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g0-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.1585, 0.0000, 0.0000, 0.0000, 0.5636], [0.6246, 0.0000, 0.0000, 0.0000, 1.0704], [0.0000, 0... [0.0000, 0.5154, 0.0000, 0.0000, 0.8562], [0.0000, 0.0000, 0.0000, 0.3472, 1.0348]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g0-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 2.7713, 0.0660, 1.2188, 0.4864], [0.0000, 0.2083, 0.0000, 1.1882, 0.2260], [0.0000, 1... [0.3085, 2.5072, 0.0000, 0.0000, 0.5998], [0.0000, 1.1852, 1.0088, 0.0000, 1.4331]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g1-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.0000, 2.3390, 1.0991, 1.8052], [0.0000, 2.0598, 3.0507, 3.1248, 3.3416], [0.0000, 0... [0.0000, 0.7962, 0.0000, 0.0635, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g1-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 1.4257, 3.4639, 0.0000, 1.7592], [0.6720, 2.0973, 0.5037, 0.0000, 0.0127], [0.0000, 0... [0.0000, 0.0000, 3.7724, 0.5622, 1.0016], [1.3971, 0.5749, 0.0000, 0.8470, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g2-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.4976, 1.0941, 1.6991, 0.8592, 0.1752], [1.0626, 0.0679, 1.2594, 0.5604, 0.0066], [0.1063, 0... [0.0000, 0.5206, 1.2513, 1.6043, 0.3325], [0.0000, 0.4688, 0.1870, 1.2528, 0.1846]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g2-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.4845, 0.0000, 0.0000, 0.0000, 2.8829], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 2... [2.5745, 0.0000, 0.0000, 0.0000, 1.1305], [0.2341, 0.0000, 0.0000, 0.0000, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g3-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[3.9357, 1.2618, 0.0000, 0.6383, 0.0000], [0.0000, 0.0000, 0.8488, 0.6486, 0.1943], [0.0000, 0... [0.0000, 1.0634, 0.9607, 1.3736, 0.0000], [0.2360, 0.0000, 1.1149, 0.0000, 2.3872]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g3-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.2410, 0.0415, 0.0000, 0.7263], [0.5832, 0.0000, 1.3551, 0.0000, 0.0000], [0.2466, 0... [0.0000, 0.0000, 1.7840, 0.0000, 0.0000], [0.0000, 0.0000, 0.4463, 0.0000, 1.5614]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g4-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0869, 0.9946, 0.8170, 0.3927, 0.8594], [3.0572, 0.0000, 1.1308, 0.0000, 1.7594], [0.0000, 0... [1.1543, 0.0000, 0.5000, 0.0000, 0.0183], [0.0000, 1.0264, 0.0000, 0.0783, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g4-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.6308, 3.6605, 1.7524, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 1.0386], [0.2584, 0... [0.5457, 0.0000, 0.0000, 0.0000, 0.8775], [1.4006, 0.0000, 0.0000, 0.0000, 2.1852]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g5-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000e+00, 0.0000e+00, 0.0000e+00, 4.5259e-02, 0.0000e+00], [0.0000e+00, 6.2370e-04, 0.0000e+00, 4.6...00, 5.1441e-01], [9.9388e-01, 0.0000e+00, 8.2020e-01, 6.8841e-01, 1.5739e+00]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g5-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.0000, 1.4082, 2.8034, 0.8778], [1.9190, 0.1259, 0.0248, 3.3276, 0.0000], [0.0000, 0... [0.0000, 0.0000, 0.0000, 0.0000, 0.1052], [0.0000, 0.0000, 0.0000, 0.0000, 0.9237]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g6-idtype0] _______________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 1.8150, 0.3608, 0.0000, 1.3258], [0.0000, 1.0446, 0.4713, 0.4176, 2.7303], [0.0000, 1... [0.0000, 1.1925, 1.1038, 0.0000, 0.0000], [0.4410, 2.2797, 1.2218, 0.0000, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g6-idtype1] _______________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.0000, 1.4520, 0.0000, 0.0000], [0.0858, 0.4558, 0.9923, 0.0000, 0.0000], [0.0000, 0... [0.0000, 0.0000, 2.3225, 0.0000, 0.0000], [0.1389, 0.0000, 2.3790, 0.0000, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g7-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.9312, 0.0000, 0.0000, 0.0000], [1.4138, 0.0000, 0.0000, 1.1098, 0.0000], [0.6215, 0... [0.3499, 0.0000, 0.0000, 0.4962, 0.0000], [1.6557, 0.0000, 3.7999, 0.0000, 1.4334]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[pool-g7-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'pool' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0966, 0.0000, 0.0000, 1.7586, 4.4027], [0.0000, 0.0143, 0.7113, 1.0392, 0.0000], [0.0000, 0... [0.0000, 1.8970, 1.5842, 0.0000, 0.0000], [0.0000, 2.1559, 0.0000, 0.5469, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g0-idtype0] ________________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1317, 0.8249, -1.9308, -0.4633, 1.3161], [-0.5204, -1.4748, -0.0529, 0.3877, -0.3051], ...0.8400], [-1.6038, 1.8152, -0.0849, 1.4837, -1.1731], [-1.0338, 0.2002, 0.6466, -0.7875, -3.2355]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g0-idtype1] ________________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2721, -0.6001, -1.2396, -0.8582, -1.6975], [-0.7693, -0.4166, -0.2530, -0.3657, -0.8675], ...0.4995], [ 1.9809, 1.0196, -0.2863, 2.1411, -1.2307], [ 0.0409, -0.2123, 0.0104, -0.0176, -1.3512]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g1-idtype0] ________________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4227, 0.4643, 1.6042, 0.7281, 1.5001], [ 0.5219, -0.9932, 1.2095, -0.0889, -0.7277], ...0.1642], [-0.1649, 1.0272, -1.8370, -0.6352, -1.0884], [-0.8506, 1.8167, -0.1743, -0.4326, -1.5211]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g1-idtype1] ________________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4624, 0.4306, -0.7809, 1.3977, 1.5689], [ 0.0907, 1.0231, -0.5921, -0.6391, 1.6878], ...0.4652], [ 0.1927, -1.8232, -0.9590, 0.0984, -0.9140], [ 1.6839, -0.8043, 1.3981, -0.5844, 2.1040]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g2-idtype0] ________________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5539, -0.7815, -0.0902, -2.3258, -0.9464], [-0.5937, -0.4888, -0.2014, 0.6368, 0.1389], ...0.7342], [ 1.6375, -1.0547, 0.4803, -0.2956, -0.4565], [-0.3084, -0.5308, 0.0575, 0.2688, -0.5630]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g2-idtype1] ________________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8919, -0.9838, -0.0758, 0.1411, -0.2249], [ 1.2644, -1.5220, -0.4056, 0.4851, -0.5592], ...0.7041], [ 1.4738, 1.2664, 0.8684, 0.3717, -0.7432], [-0.6703, 0.9671, -1.5924, 0.3516, -0.0820]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g3-idtype0] ________________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-4.9485e-01, 6.3729e-01, -6.7693e-01, 2.7704e+00, -8.7528e-01], [-4.8373e-01, 7.8652e-01, -1.4407e...02, -1.2880e+00, -8.3344e-01, 6.3666e-01], [ 1.4071e+00, -1.9565e+00, 2.0912e+00, -7.8690e-01, -1.2858e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g3-idtype1] ________________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7310, 0.9221, 0.3632, 1.9280, 0.4146], [-0.6335, 0.1429, -0.5864, -0.3491, 0.5359], ...0.4904], [ 0.4814, 0.9299, -1.0617, -0.0411, 0.7684], [-0.1227, -0.2402, 0.8567, 0.0870, -0.0233]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g4-idtype0] ________________________ idtype = torch.int32 g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3895, 1.0153, -1.8424, 1.2921, 0.3789], [-0.5200, -0.4616, -0.1287, 0.6863, 1.6166], ...1.6807], [ 0.1907, 0.2240, -1.9563, 1.3175, -0.0841], [-0.8794, 0.1521, -0.4919, -1.2872, 0.7056]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g4-idtype1] ________________________ idtype = torch.int64 g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.6726, -0.4565, -0.1422, -0.1390, 0.2742], [ 1.3324, -1.2843, -0.5101, 0.5544, -0.4567], ...0.3577], [ 0.0221, 0.5931, -0.7505, 0.3465, -1.2055], [ 0.1464, 0.0984, 1.3545, -0.2890, 2.3422]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g5-idtype0] ________________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0646, 0.3022, 0.5060, 0.0165, -0.1680], [-0.1863, -0.5406, 0.3033, -0.2630, -1.2373], ...2.0721], [ 0.8471, -0.2928, 0.9058, -0.9212, -1.9147], [-0.0233, -1.2787, -0.6364, -1.3143, -0.5757]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g5-idtype1] ________________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2002, -1.6793, 0.5727, -1.9986, -0.4839], [-1.4539, 1.2085, -0.2599, -2.9339, -0.1177], ...0.9278], [-0.9053, 0.0461, 0.2047, -0.1742, 0.8499], [-0.4104, -1.1370, 0.4894, -0.5011, -0.9028]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g6-idtype0] ________________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.2947, 0.8801, 0.1600, 0.0176, -0.3397], [ 0.6011, -0.5153, -0.0909, 0.7892, -1.6008], ...0.3166], [ 0.0714, -0.8795, -1.0113, -0.9840, -2.0283], [-0.3023, 1.0605, -2.1490, 0.2935, -0.5128]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g6-idtype1] ________________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6671, 0.4295, -0.1758, -1.3110, -0.3408], [ 0.2610, -0.7964, 0.3317, -1.1380, 0.6887], ...0.4561], [ 1.3425, 1.2358, -0.8415, -0.9569, 0.5872], [ 0.3366, -1.4637, 0.8637, -0.9939, -1.0113]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g7-idtype0] ________________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2117, -0.6470, 0.7409, -0.2614, 1.5635], [ 0.6877, -1.8111, 1.5840, 1.4461, -0.8770], ...1.5089], [ 0.5453, -0.8471, -0.6357, 0.1641, -0.6634], [ 0.7166, 1.5923, 1.5609, 0.3108, 1.6669]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[gcn-g7-idtype1] ________________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'gcn' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3355, 0.2068, -0.1222, 0.7349, -0.9088], [-0.3700, -0.7060, -0.9482, 0.1384, -0.9776], ...0.3440], [ 0.7908, -0.4497, -1.0618, 1.0447, -0.3508], [ 0.3749, -0.1648, 0.2236, -0.8188, 0.3056]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_sage_conv[lstm-g0-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 0.0544, 0.1762, -0.0123, -0.7046, 2.0941], [-0.8694, 0.4527, 0.5545, -0.6142, -0.4968], ...1.9667], [-0.0466, 0.7508, 0.6995, -0.3397, -0.5727], [ 1.8063, 0.8264, 0.2978, 0.0687, -0.2268]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g0-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-0.8851, 1.1868, -0.9101, -1.4355, 0.2530], [-1.2803, -0.0293, 0.4718, 0.5390, -0.3281], ...0.5542], [ 0.1706, 0.8044, -1.0459, 0.1604, -2.0331], [-0.8538, 1.6266, -1.3029, 1.0085, 0.9217]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g1-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-0.6543, -0.6353, -0.6893, 1.6505, 0.3748], [-0.0247, 0.0374, 1.8475, -0.5484, -1.5510], ...0.9698], [ 0.7876, -0.7301, -0.6944, -0.6687, 1.4542], [ 0.8526, -0.1140, -0.3254, -0.8657, -0.9688]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g1-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 1.7050, -1.2768, -2.0223, 1.8231, 1.0498], [-0.4519, -0.8706, 0.7210, -0.3761, -1.0483], ...0.9027], [-0.9603, 0.5364, 0.7130, -0.0129, 1.1508], [-0.0767, 0.8617, -1.6927, 1.5943, -0.3463]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g2-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 0.7758, 1.6994, 1.3097, -0.4352, 1.4428], [-1.8886, 1.5648, -0.8342, -0.1991, -0.3190], ...0.7745], [ 1.1931, 0.8746, 0.4170, -0.3326, 0.2879], [-1.6256, -0.1270, 0.3621, -1.2274, 0.5574]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g2-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 2.4081, -0.7503, 1.1796, -0.7593, -0.1321], [ 1.1219, -0.2574, -0.4210, -0.0353, -0.0903], ...0.7208], [-0.4674, -0.6958, 0.3037, 1.0330, 0.2633], [ 0.1138, -0.1878, 1.1796, 1.6160, 0.3083]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g3-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 0.5247, 0.4713, -0.7374, 1.1520, -1.1929], [-1.1691, -0.3961, 0.8512, -0.0165, 0.0934], ...1.0899], [ 0.9810, 2.1578, -0.1592, -1.0056, -0.5694], [-0.0473, -0.0778, -0.1367, -0.7316, -0.4092]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g3-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-0.0693, -0.6114, -0.3043, -0.7791, -0.1575], [ 0.1809, -0.2016, 1.0209, -1.1739, 2.1141], ...0.0052], [ 0.7676, 1.0426, -0.9768, -0.3668, -1.0214], [ 1.5826, -0.2150, -0.0765, -2.7617, 0.6783]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g4-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-0.6559, 0.8538, -1.3506, -0.4642, -0.9858], [-0.8282, 0.1550, -1.8817, -1.9218, 1.4038], ...0.3345], [-0.5609, 0.6256, 1.3528, -0.4574, 0.4690], [-0.4918, 0.2804, 0.7062, -0.7548, -0.2902]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g4-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-1.4225, 0.0898, 0.2211, 2.1565, -0.3492], [ 2.1033, -1.4210, 0.0624, -0.4144, -0.3323], ...1.5091], [ 1.3346, 0.9257, 0.1212, 0.0998, -1.7716], [-1.2207, 0.3490, -0.9693, 1.5240, -1.7165]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g5-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-0.6374, -0.9077, -0.9458, -0.0568, 0.8216], [ 0.0236, -0.4668, -0.1924, 1.3923, 0.4500], ...1.3198], [-0.5151, -0.2244, -0.1011, -0.3936, -1.6569], [-0.8406, 0.5607, 1.2703, -0.6100, 1.0739]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g5-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 0.0962, 0.6598, 0.4890, 0.0233, 0.2376], [-1.5510, -0.7440, -0.3303, 0.8969, 1.4227], ...1.2613], [-0.2466, 1.2698, -0.8042, 1.0269, -1.2107], [ 1.6837, -0.3120, 0.1987, -0.6342, -0.1233]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g6-idtype0] _______________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 0.5407, -0.4451, 0.6384, -0.6729, -1.2293], [ 1.4116, -0.6518, 0.7805, 1.1927, -1.2893], ...0.8235], [ 0.9921, -0.3395, 0.8530, 1.2187, 1.7495], [-0.8063, -0.2532, 1.6409, 0.9818, 0.1885]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g6-idtype1] _______________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 6.4376e-01, -2.1773e+00, -2.8430e-01, -8.4108e-01, 2.1034e-01], [ 1.9406e-03, 3.1938e-01, 1.2224e...01, -1.4588e+00, 7.5040e-01, 3.7610e-01], [-2.9940e-02, -5.8264e-01, 2.6539e-01, -2.4611e-01, -6.9269e-01]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g7-idtype0] _______________________ idtype = torch.int32 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-1.0313, -0.1786, 1.4341, 0.5708, 0.9777], [ 1.0823, -1.3628, -1.9957, -1.2569, 1.0563], ...0.3093], [-0.5011, -1.4219, 0.5885, -0.6005, 0.8242], [-0.5651, 0.7029, 0.2103, -0.4807, 0.3125]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _______________________ test_sage_conv[lstm-g7-idtype1] _______________________ idtype = torch.int64 g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) aggre_type = 'lstm' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) def test_sage_conv(idtype, g, aggre_type): g = g.astype(idtype).to(F.ctx()) sage = nn.SAGEConv(5, 10, aggre_type) feat = F.randn((g.number_of_src_nodes(), 5)) sage = sage.to(F.ctx()) # test pickle th.save(sage, tmp_buffer) > h = sage(g, feat) tests\pytorch\test_nn.py:608: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 0.0700, -0.3559, -0.0451, -0.3154, -0.4079], [ 0.5923, -0.0578, 0.5590, -0.2135, -1.7571], ...0.4190], [ 0.2922, 1.2955, 0.6359, -1.0558, 0.9277], [ 0.9773, -0.5745, -0.2773, 1.2643, 1.4278]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[1-mean-g0-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'mean', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.1250], [ 2.2057], [-0.8784], [-0.9439]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-mean-g0-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'mean', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.5425], [ 2.4914], [-1.5760], [-0.2270]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-mean-g1-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'mean', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 3.6887], [-1.1183]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-mean-g1-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'mean', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1828], [-1.3385]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-mean-g2-idtype0] _____________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'mean', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.4523], [ 1.2955], [ 0.8773], [-1.1241], [-0.8164], [-2.0507]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-mean-g2-idtype1] _____________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'mean', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8007], [ 0.7882], [-0.7265], [ 2.2861], [-2.0374], [ 1.6006]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-pool-g0-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'pool', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.0000, 1.7026, 0.0000, 0.0989, 1.8490, 0.0000, 0.0000, 2.6321, 1.0383], [2.2170, 1.... [0.0000, 0.0482, 0.0000, 0.0000, 0.6505, 0.5561, 0.0000, 0.0000, 0.2682, 0.6782]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-pool-g0-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'pool', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.8398, 0.0000, 0.0000, 0.9895, 0.0305, 0.0077, 0.0000, 1.1758, 0.0000, 0.0000], [0.7710, 2.... [0.0000, 0.0000, 0.0000, 1.1543, 0.6486, 0.0000, 0.0000, 0.0000, 1.2503, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-pool-g1-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'pool', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.3445, 0.0000, 0.0000, 0.9821, 0.0000, 0.4790, 0.0000], [0.0325, 0.0000, 0.2090, 0.1014, 0.0000, 0.3628, 0.0000, 0.0516, 0.0000, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-pool-g1-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'pool', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.0928, 1.2135, 0.0038, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0960], [0.0000, 2.6516, 0.1269, 0.0000, 0.0000, 0.0000, 0.1389, 0.0000, 0.0000, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-pool-g2-idtype0] _____________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'pool', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.6514, 0.0000, 0.0000, 0.8830, 4.0294, 0.2809, 0.0000, 0.0000, 1.0419], [1.1047, 0.... [0.4095, 0.0000, 0.0000, 0.4766, 1.5695, 1.9555, 0.0000, 0.3870, 0.0000, 1.3458]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-pool-g2-idtype1] _____________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'pool', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.8103, 0.0000, 0.0510, 2.6135, 0.6568, 2.1943, 0.0000, 0.1374, 0.0000], [0.0035, 3.... [0.0000, 2.2380, 0.0000, 1.5116, 1.0177, 1.4157, 1.1628, 0.1126, 3.1787, 0.3180]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[1-gcn-g0-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'gcn', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3361], [-0.1347], [ 3.9180], [ 2.4078]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[1-gcn-g0-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'gcn', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.6484], [0.3302], [1.3775], [0.8802]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[1-gcn-g1-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'gcn', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8833], [-1.6978]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[1-gcn-g1-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'gcn', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.1861], [ 1.3745]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[1-gcn-g2-idtype0] _____________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'gcn', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0953], [-0.6711], [-2.9648], [ 1.3850], [-2.1642], [-0.4693]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[1-gcn-g2-idtype1] _____________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'gcn', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.7613], [ 0.5151], [-1.8637], [-0.6371], [-0.3214], [ 0.7414]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[1-lstm-g0-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'lstm', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-1.7643, -2.0578, 1.7569, 1.0039, 1.9650, 0.7118, 0.9555, -0.6192, -0.3978, -1.2367], [... 0.2715], [ 0.2582, 2.0767, -0.2051, 1.0329, 0.7733, -0.1276, 1.2515, -0.2024, -0.6516, 0.4189]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[1-lstm-g0-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'lstm', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 4.6498e-01, 1.3706e+00, 2.2719e-01, 3.5126e-01, 8.7833e-01, -5.2887e-01, -4.3218e-01, -1.1734e-...-01, 2.7957e-01, 6.6686e-01, -6.2588e-02, 7.4312e-01, 8.7782e-01, 1.0510e+00, 3.7153e-01, 5.2126e-01]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[1-lstm-g1-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'lstm', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-0.1367, 0.5358, 2.4593, 0.8362, -1.9374, -0.2122, -0.0704, -0.1004, -1.3655, -0.0352], [ 0.1371, 0.2828, -0.2729, -0.1885, -0.8072, 0.4908, -0.4412, -0.5351, -0.4777, -1.2909]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[1-lstm-g1-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'lstm', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 1.4891, -0.1086, -0.2258, -0.9871, 0.2672, -0.2113, 0.0343, -0.5318, -0.5578, 0.4856], [-1.2217, 0.6697, -0.4637, -0.1946, -0.7720, -1.7658, -0.2480, -1.0679, -0.6720, -0.7057]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[1-lstm-g2-idtype0] _____________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'lstm', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-1.1263, -1.0317, 2.3468, 0.5330, -0.2204, 0.2433, -1.3930, 0.2225, -0.0258, -1.0189], [...-0.3886], [ 0.9357, -0.5037, -1.3425, 0.3275, -0.5899, -0.1657, 0.0109, 0.7961, -0.2692, 0.9374]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[1-lstm-g2-idtype1] _____________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'lstm', out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-0.3295, 0.6882, -0.3799, 0.5197, -0.3487, 1.9932, -0.4821, -0.8118, 1.5472, 1.6226], [...-0.6728], [ 0.2739, 1.2477, 0.9486, -0.0962, 0.2667, -0.5894, 0.7396, 0.3935, 0.8567, -1.1778]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[2-mean-g0-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'mean', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0489, 0.5645], [-0.3063, -2.2330], [-1.8266, 1.3116], [-3.9712, -2.3290]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-mean-g0-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'mean', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.9249, 4.9890], [ 2.5473, 0.1085], [-3.9742, 1.1358], [-3.0672, 1.4615]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-mean-g1-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'mean', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.8889, 0.2545], [-0.4874, 0.8715]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-mean-g1-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'mean', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5525, -1.4295], [-0.4075, 2.2417]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-mean-g2-idtype0] _____________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'mean', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8514, -0.5656], [ 1.8181, -2.6620], [-3.0471, -2.4134], [-1.3824, -0.6803], [ 1.6069, -0.8870], [-0.8269, -3.1130]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-mean-g2-idtype1] _____________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'mean', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:235: in forward graph.update_all(msg_fn, fn.mean('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.1622, -0.7949], [-0.5807, 1.4902], [ 1.4357, 1.8260], [-1.1657, -0.6123], [-0.6318, 1.3783], [-1.6290, -0.4152]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-pool-g0-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'pool', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.2835, 2.0616, 0.0000, 0.9364, 0.0000, 0.0000], [0.0137, 1.... [0.0000, 0.5000, 0.0000, 0.0000, 0.0000, 0.0000, 3.8947, 0.0000, 1.3107, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-pool-g0-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'pool', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.0000, 0.1842, 0.0000, 0.0000, 0.0000, 0.0000, 3.4215, 2.3464, 0.0000], [0.0000, 0.... [0.0000, 0.0000, 0.1347, 0.0000, 0.0000, 0.0000, 0.0000, 3.2485, 3.9125, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-pool-g1-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'pool', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.2699, 1.1667, 2.4329, 2.2943, 0.0000, 0.0000, 0.0000, 0.7549, 0.2093, 0.0000], [1.8079, 0.0000, 0.0000, 1.2063, 1.3089, 0.0000, 0.0000, 0.7123, 0.5319, 0.9047]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-pool-g1-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'pool', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.2261, 0.0000, 0.0000, 0.0000, 1.9870, 0.0000, 0.0000, 0.5471, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 3.4257, 0.1079, 0.0000, 0.0000, 3.2329, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-pool-g2-idtype0] _____________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'pool', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.0000, 2.3856, 0.0000, 3.5579, 0.0747, 0.0000, 0.2106, 1.3117, 0.4992], [0.0000, 0.... [0.1936, 1.3845, 0.0000, 0.1639, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-pool-g2-idtype1] _____________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'pool', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:257: in forward graph.update_all(msg_fn, fn.max('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.0000, 0.0000, 1.6486, 0.8147, 0.0000, 0.0000, 0.8223, 0.0685, 1.3873, 0.2088], [0.0000, 2.... [0.7101, 0.0000, 1.3674, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[2-gcn-g0-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'gcn', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0646, 0.8017], [ 0.3282, 0.2623], [-1.7307, 1.4024], [ 1.3608, -1.4565]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[2-gcn-g0-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'gcn', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.4131, -1.0959], [ 0.2039, -2.2723], [-3.7041, 2.3179], [ 1.1539, 0.8174]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[2-gcn-g1-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'gcn', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[4.0023, 2.3261], [1.6924, 1.8011]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[2-gcn-g1-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'gcn', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9533, -0.7720], [-2.8326, -1.3237]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[2-gcn-g2-idtype0] _____________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'gcn', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.7798, 2.0964], [-3.1119, 2.5519], [-1.0109, -1.0529], [-3.0597, 0.5489], [-1.7212, -2.1765], [ 0.3687, 0.0886]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_sage_conv_bi[2-gcn-g2-idtype1] _____________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'gcn', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.9102, 0.3674], [ 0.0179, -0.5913], [ 4.9297, -0.6748], [-0.8237, 2.1899], [ 0.2711, 0.5452], [-4.4050, 2.0423]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_sage_conv_bi[2-lstm-g0-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'lstm', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-0.8607, 0.3098, 0.2808, -0.1653, -0.1592, 0.0485, -0.5745, -0.5345, 1.8666, -1.4028], [... 0.1401], [ 0.2440, 0.5697, 1.3561, 1.3104, -0.9470, -1.6598, 0.3752, 0.2317, 0.1118, 0.7133]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[2-lstm-g0-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'lstm', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 0.3738, 0.6188, 1.1808, -0.2378, 0.4823, 0.5211, -1.4715, 0.7935, 0.7130, 0.1403], [...-1.3522], [ 0.3115, 0.4445, -0.1987, -1.0467, -2.6066, -1.7669, 0.4564, 1.2884, -0.3591, -1.5631]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[2-lstm-g1-idtype0] _____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'lstm', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 1.3395, -0.0973, 1.1509, 0.4748, -0.2030, 0.3357, -0.1795, -0.7663, -0.7051, -0.6345], [ 0.3183, 2.5592, 0.5120, 0.2009, 1.2238, 1.0704, 0.7836, 2.6911, 1.4359, -0.3083]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[2-lstm-g1-idtype1] _____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) aggre_type = 'lstm', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-0.4320, 1.2556, 0.8403, 0.0075, -0.4551, -1.0147, -1.4030, -0.1795, -0.6305, 1.2127], [ 1.3403, -0.9065, -0.0187, -1.0294, -1.0403, 1.7941, 0.2398, 1.3540, 0.0667, 1.2675]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[2-lstm-g2-idtype0] _____________________ idtype = torch.int32, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'lstm', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 0.4768, 0.8830, -0.5621, 1.6969, 0.0225, -1.3958, -1.3762, -0.7441, -0.7116, 0.4947], [... 0.2954], [ 0.2342, 0.3976, -0.3529, 0.1594, -0.2597, -0.3330, -0.6725, -0.9375, 0.8374, -1.2583]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ____________________ test_sage_conv_bi[2-lstm-g2-idtype1] _____________________ idtype = torch.int64, g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) aggre_type = 'lstm', out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'])) @pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn', 'lstm']) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv_bi(idtype, g, aggre_type, out_dim): g = g.astype(idtype).to(F.ctx()) dst_dim = 5 if aggre_type != 'gcn' else 10 sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type) feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim))) sage = sage.to(F.ctx()) > h = sage(g, feat) tests\pytorch\test_nn.py:621: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:261: in forward graph.update_all(msg_fn, self._lstm_reducer) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python\dgl\core.py:276: in invoke_gsddmm z = op(graph, x) python\dgl\ops\sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 1.0471e+00, -1.2653e+00, -5.2789e-01, 5.7426e-02, -1.0818e+00, 4.4435e-01, 2.9689e-01, -7.2202e-...+00, -4.0678e-01, -1.2995e+00, -7.9521e-01, 7.2025e-01, -1.5633e+00, -1.6645e+00, -1.9601e+00, -1.4754e+00]]) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_sage_conv2[1-idtype0] __________________________ idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv2(idtype, out_dim): # TODO: add test for blocks # Test the case for graphs without edges g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3}) g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() sage = nn.SAGEConv((3, 3), out_dim, 'gcn') feat = (F.randn((5, 3)), F.randn((3, 3))) sage = sage.to(ctx) > h = sage(g, (F.copy_to(feat[0], F.ctx()), F.copy_to(feat[1], F.ctx()))) tests\pytorch\test_nn.py:636: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7503], [-0.3877], [ 1.7361], [-0.7325], [-0.4650]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sage_conv2[1-idtype1] __________________________ idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv2(idtype, out_dim): # TODO: add test for blocks # Test the case for graphs without edges g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3}) g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() sage = nn.SAGEConv((3, 3), out_dim, 'gcn') feat = (F.randn((5, 3)), F.randn((3, 3))) sage = sage.to(ctx) > h = sage(g, (F.copy_to(feat[0], F.ctx()), F.copy_to(feat[1], F.ctx()))) tests\pytorch\test_nn.py:636: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8477], [ 0.3394], [ 2.8783], [-1.5177], [-1.7766]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sage_conv2[2-idtype0] __________________________ idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv2(idtype, out_dim): # TODO: add test for blocks # Test the case for graphs without edges g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3}) g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() sage = nn.SAGEConv((3, 3), out_dim, 'gcn') feat = (F.randn((5, 3)), F.randn((3, 3))) sage = sage.to(ctx) > h = sage(g, (F.copy_to(feat[0], F.ctx()), F.copy_to(feat[1], F.ctx()))) tests\pytorch\test_nn.py:636: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4078, -1.2594], [-0.0893, -0.0618], [ 0.4615, -0.6097], [ 0.9645, 0.6322], [ 0.0364, -1.0941]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sage_conv2[2-idtype1] __________________________ idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('out_dim', [1, 2]) def test_sage_conv2(idtype, out_dim): # TODO: add test for blocks # Test the case for graphs without edges g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3}) g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() sage = nn.SAGEConv((3, 3), out_dim, 'gcn') feat = (F.randn((5, 3)), F.randn((3, 3))) sage = sage.to(ctx) > h = sage(g, (F.copy_to(feat[0], F.ctx()), F.copy_to(feat[1], F.ctx()))) tests\pytorch\test_nn.py:636: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 4.0099, -4.2069], [ 3.6503, -0.0156], [-0.8242, -0.3775], [-0.6331, -3.0260], [-0.0086, -1.8557]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g0-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1153e+00, 6.2224e-01, 7.5383e-01, -7.6151e-01, -1.6290e+00], [ 2.9963e-01, -9.1867e-01, -2.6921e...00, 1.2709e-01, 3.8796e-01, 2.5487e-01], [-1.1475e+00, -1.1530e+00, -8.7164e-01, -1.9866e-01, 1.6638e-01]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g0-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0928, -0.3243, -1.0257, -0.0284, 0.2070], [ 1.2326, -0.3536, -0.2209, -0.4852, 0.1602], ...0.2397], [-0.8215, -1.0241, 0.1723, -0.0351, 0.6604], [-0.7579, 0.2230, -0.0809, -0.6123, 0.8807]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g1-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2158, 0.4157, -0.2189, -0.5853, -1.0790], [-0.5006, 0.2078, -0.0058, 0.0240, 1.2029], ...0.2636], [-1.0410, -0.9868, -0.9353, 0.5991, 0.6517], [-0.7061, 0.8314, -0.0315, 0.7336, 0.0050]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g1-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7625, -1.1514, -0.9397, 0.2973, -0.9160], [-0.2303, -0.2627, 0.3862, 0.4743, 0.5665], ...0.9913], [-0.1590, 0.6508, 0.4640, 0.3804, -0.4416], [ 1.1729, 1.4288, 0.4643, 0.3029, 0.1413]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g2-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1421, -0.0257, -0.3185, -0.6716, -0.2598], [ 0.6564, -0.3693, -0.7975, 0.0162, 0.1493], ...0.2139], [-0.0872, -0.6665, 0.5276, -1.5325, 0.7190], [ 0.0667, -1.0048, -0.2295, -0.6781, -1.7817]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g2-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1237, 1.1311, 0.7043, 1.1315, 0.3190], [ 0.0840, -0.8494, 0.6983, -0.8182, -0.4864], ...1.6545], [-1.3567, 0.4707, 0.8853, -1.4294, 0.1069], [-0.6326, -0.0563, 0.7955, 0.3189, -1.7650]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g3-idtype0] _________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5359, -0.7373, -0.1986, 0.3809, -1.0554], [-0.4912, 0.2131, 0.1595, -1.0143, 0.6709], ...0.6299], [-1.2511, -1.0104, 0.0441, 0.0149, 1.2652], [ 0.5878, 0.4335, -0.0434, -0.5360, -0.6394]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g3-idtype1] _________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-6.9358e-01, -2.4879e-01, -2.4465e+00, 8.9066e-01, 4.7676e-01], [-1.7994e-01, 2.2614e-01, -7.1317e...02, 8.3151e-01, -4.9775e-01, 1.3894e-01], [ 1.6549e-02, 9.4562e-01, -1.2167e-01, -8.4982e-01, 1.4768e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g4-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4010, 0.1829, 0.1227, -0.0140, -0.6890], [ 0.3091, -0.2111, 0.0620, -0.2190, 0.8252], ...1.1525], [-1.5799, 0.3866, 0.0354, -0.9569, -0.4465], [-0.2135, -1.0207, 0.3849, -0.7626, -0.2160]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g4-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4565, -0.1102, 0.7633, -0.4977, -0.7755], [-0.6357, 0.5309, 1.2306, 0.2407, 0.2489], ...1.3964], [-0.2957, 0.8520, -1.6005, 0.1575, 0.1486], [ 2.0756, 0.0430, -0.1943, -3.5298, -0.8414]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g5-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9835, 1.1724, 0.4163, 0.7120, -0.6683], [ 0.7105, 0.3790, -0.0564, -1.0632, 0.6019], ...0.2274], [-1.0582, -0.6426, -1.3442, 1.1950, 0.0438], [ 1.1084, -0.7226, -2.8490, 0.4675, 0.7968]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[1-g5-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2725, -0.6251, -0.7310, 0.1805, -0.7205], [ 0.5960, 0.1132, 0.3324, -0.2837, 0.2707], ...0.0091], [-0.4190, 0.7394, -0.0046, -1.3882, -1.1518], [ 0.5905, -0.0313, 0.5909, 0.7899, 0.7637]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g0-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4751, 0.5176, 1.0550, 0.7969, -0.4435], [ 0.0225, 0.5541, 0.1618, 0.5451, 0.2055], ...0.8868], [ 0.6143, 1.6895, -0.1782, -0.1688, -0.1521], [-0.2698, 1.2282, -0.2693, -1.5887, -0.0491]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g0-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0581, -0.8305, -1.3066, 0.5630, -0.4314], [-0.0966, 0.1328, -0.1938, 0.4745, 0.1688], ...0.6270], [-1.2290, -0.2114, -0.4526, 1.0747, -1.3044], [ 2.3246, 0.0307, 1.4267, -1.9825, 1.0128]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g1-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9076, 0.2035, 0.6050, 1.0509, -0.2774], [-0.1183, 0.5136, 1.0129, -0.0960, 0.4883], ...0.7544], [-0.7401, -0.3792, -0.4190, -1.4912, 0.8041], [ 0.6352, 1.4845, -0.1968, 0.3076, -0.8869]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g1-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9261, -0.8832, -0.6830, -0.6839, 0.1409], [ 0.4890, -0.1235, -0.2927, -0.0656, -1.3658], ...0.5201], [ 1.8555, 3.2268, -1.0963, 0.0656, -1.2751], [-0.3706, -0.4129, 0.1674, -0.4970, 1.3016]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g2-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3531, 0.0983, -0.2150, 0.2558, -0.0818], [-0.1776, 0.1130, -0.1080, -0.0919, -0.0484], ...1.6309], [-1.1397, 1.6878, 0.4944, -0.4484, 0.8537], [-0.1935, 0.1629, -2.5101, 1.1200, 1.3222]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g2-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3318, -0.9306, -1.0793, -0.3981, -0.8571], [-0.2308, -0.5392, -0.7182, 0.3514, -0.4415], ...0.6080], [ 0.0759, -0.1025, 0.1057, 0.2420, -0.1594], [-1.2331, -0.4894, -0.0777, 0.9134, 0.0118]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g3-idtype0] _________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3044e+00, -1.8580e+00, -3.3866e-01, -7.0749e-01, -1.9628e+00], [ 1.4719e+00, 9.3374e-01, -1.2402e...00, 5.2632e-01, -1.0365e+00, 5.8922e-01], [-6.0602e-01, 2.2662e-01, -5.4622e-01, 2.2425e-01, -5.6242e-01]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g3-idtype1] _________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.5670, 1.3418, 0.0205, 0.0554, 1.2156], [-0.5563, 0.0522, -1.0762, 0.0051, -0.2892], ...1.1610], [ 0.1171, 0.5560, 0.6516, 0.3490, -1.0903], [-0.5307, -1.2562, 0.6655, -0.0182, 0.9046]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g4-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8036, -0.6416, 0.9105, 0.2474, 0.8440], [ 0.5363, -0.1496, -0.5685, 0.2297, -0.2812], ...0.5207], [-0.5266, 0.2349, 1.3866, 0.1286, -0.1605], [-1.4979, -0.2791, -1.1554, 0.5632, 0.6840]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g4-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4208, -0.6790, -0.1098, 0.6655, 0.1232], [-0.3847, -0.6806, 0.1624, -0.9245, -0.5730], ...0.0143], [-1.4645, -0.8081, -2.3578, -1.1136, 0.6103], [-1.8674, 1.3260, 0.1762, 0.1219, -0.6558]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g5-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2771, 0.8985, 1.1932, -0.1613, -0.3753], [-1.0308, -1.7162, -0.6974, -0.0261, 0.2392], ...0.6472], [ 0.1996, 0.3721, 0.4608, -1.5719, 2.0905], [ 0.5852, -1.3563, 0.8762, 1.9243, -0.0025]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_sgc_conv[2-g5-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_sgc_conv(g, idtype, out_dim): ctx = F.ctx() g = g.astype(idtype).to(ctx) # not cached sgc = nn.SGConv(5, out_dim, 3) # test pickle th.save(sgc, tmp_buffer) feat = F.randn((g.number_of_nodes(), 5)) sgc = sgc.to(ctx) > h = sgc(g, feat) tests\pytorch\test_nn.py:662: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:203: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2411, -0.4401, -0.0199, 0.3107, 0.1241], [ 0.3906, 0.5180, 0.1621, -1.2393, -0.5666], ...0.4666], [-0.6296, 1.0790, -1.0514, -0.7966, -0.8324], [-0.4257, -0.0182, -1.7608, 1.9983, 0.4763]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g0-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.3022, 0.6082, 0.5632, -0.6902, 0.7791], [-0.4253, 0.2931, -0.1326, -0.7940, -0.2531], ...0.3433], [ 0.8229, 0.6415, -0.6227, -1.2144, 0.1676], [-0.6429, -0.3646, 0.4035, 0.0946, -0.7868]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g0-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.3359, -0.1955, -0.5525, 0.7520, 0.0383], [-0.3386, -0.2548, -1.0032, 1.5583, 0.3742], ...0.0520], [-0.1452, -0.5809, 0.3677, -1.6490, 0.6598], [-1.0950, 0.5411, 1.0554, -0.4085, -0.1354]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g1-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.2455, 0.3232, 0.3403, 0.5423, -0.2726], [-0.0069, 0.4237, -0.2699, -1.1848, -0.5432], ...0.4339], [-0.3943, -2.1058, 0.8110, 0.6022, -1.0830], [ 1.3945, -0.4357, -0.1637, -1.0308, 0.4060]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g1-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.7470, 0.5226, -0.2514, 0.3301, 0.3837], [-0.5294, 0.9001, 1.2214, 0.2357, -0.6622], ...1.1281], [ 0.6578, -0.2730, 0.4807, -2.0040, 1.6251], [ 1.9827, -0.4308, -0.4573, -0.6509, 0.2080]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g2-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.7430, -0.0635, -0.1236, -0.9118, -0.1409], [-0.2851, -0.4702, -0.3732, 0.3772, 0.1108], ...1.2701], [-0.7047, 0.6995, 0.7522, 0.9739, -0.0363], [ 0.6585, 0.2290, -1.5503, -0.5822, 0.0714]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g2-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.5062, 0.0161, -0.4136, 0.6292, -0.1412], [-0.2489, 0.4808, -0.6800, 0.6940, -0.0739], ...1.3135], [-0.4537, -0.9981, -0.2241, 0.9888, 1.2246], [-0.2838, -0.4010, 2.1741, -0.4732, -0.3427]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g3-idtype0] _________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.0299, 0.2926, -0.0097, -0.4929, 0.4381], [-0.1529, 0.2010, 0.7588, -1.0679, 0.1471], ...0.0425], [-0.7706, -1.2581, 0.5618, -1.7124, 0.4557], [-0.6841, 1.0255, 0.0610, -2.3333, -0.1727]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g3-idtype1] _________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.7933, 0.4328, -0.3607, 0.4800, 1.2979], [-0.7450, -0.7528, -0.6951, -0.0517, -1.4109], ...0.5642], [ 0.0882, 0.4170, 0.4050, -0.8288, -1.3514], [ 0.5588, 0.0540, -0.7828, -0.5787, 0.8467]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g4-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-4.9494e-01, 1.6106e-01, 6.0797e-01, 9.2947e-01, 2.6904e-02], [-6.8278e-01, 1.8360e-01, -3.9227e...01, 1.6273e+00, -7.2576e-01, 9.2706e-02], [-5.6638e-01, 1.3135e+00, -2.0288e-02, -4.4796e-01, 2.8753e+00]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g4-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.3172, 0.3583, -0.8368, -0.2492, -0.0303], [ 0.1314, -0.7922, -0.7403, 0.7373, 0.0136], ...0.7224], [-1.4246, 0.6161, -0.0501, 1.2807, 0.5537], [ 1.3446, -1.5174, -1.2142, 0.9258, 2.1938]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g5-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.4701, -0.2182, -0.0842, 0.1956, 0.6631], [-0.7034, 0.7063, 0.8337, -0.8359, 0.6053], ...0.0968], [-0.2572, 0.8915, 0.8062, -1.6188, 1.6586], [-2.3579, 0.6590, 0.2064, 2.4589, -0.6835]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_appnp_conv[g5-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) appnp = appnp.to(ctx) # test pickle th.save(appnp, tmp_buffer) > h = appnp(g, feat) tests\pytorch\test_nn.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:117: in forward fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.1347, 0.5045, 0.3906, 1.3026, -0.3070], [ 1.1250, 1.4391, -0.2138, 0.4424, 0.1221], ...1.1022], [ 0.5674, -0.8320, 1.0354, 1.3648, -1.5096], [ 0.9106, -0.6724, -0.5468, 0.7959, 0.1443]]) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g0-idtype0] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g0-idtype1] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g1-idtype0] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g1-idtype1] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g2-idtype0] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g2-idtype1] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g3-idtype0] _____________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g3-idtype1] _____________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g4-idtype0] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g4-idtype1] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g5-idtype0] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_appnp_conv_e_weight[g5-idtype1] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_appnp_conv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) appnp = nn.APPNPConv(10, 0.1) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) appnp = appnp.to(ctx) > h = appnp(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:699: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\appnpconv.py:106: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g0-idtype0] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g0-idtype1] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g1-idtype0] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g1-idtype1] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g2-idtype0] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g2-idtype1] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g3-idtype0] ___________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g3-idtype1] ___________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g4-idtype0] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g4-idtype1] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g5-idtype0] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ___________________ test_gcn2conv_e_weight[True-g5-idtype1] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = True @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g0-idtype0] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g0-idtype1] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g1-idtype0] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g1-idtype1] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g2-idtype0] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g2-idtype1] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g3-idtype0] ___________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g3-idtype1] ___________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g4-idtype0] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g4-idtype1] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g5-idtype0] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_gcn2conv_e_weight[False-g5-idtype1] ___________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, bias = False @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize("bias", [True, False]) def test_gcn2conv_e_weight(g, idtype, bias): ctx = F.ctx() g = g.astype(idtype).to(ctx) gcn2conv = nn.GCN2Conv(5, layer=2, alpha=0.5, bias=bias, project_initial_features=True) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) gcn2conv = gcn2conv.to(ctx) res = feat > h = gcn2conv(g, res, feat, edge_weight=eweight) tests\pytorch\test_nn.py:714: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gcn2conv.py:236: in forward edge_weight = EdgeWeightNorm('both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/xbyak'... if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_sgconv_e_weight[g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_sgconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) sgconv = nn.SGConv(5, 5, 3) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) sgconv = sgconv.to(ctx) > h = sgconv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sgconv.py:186: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g0-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g0-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch...} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g1-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g1-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g2-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g2-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), '_src_out_w': Sch... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), '_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g3-idtype0] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g3-idtype1] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g4-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g4-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g5-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_tagconv_e_weight[g5-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'_src_out_w': Scheme(shape=(), dtype=torch.float32), '_dst_in_w': Scheme(shape=(), dtype=torch.float32)} edata_schemes={'_edge_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) > h = conv(g, feat, edge_weight=eweight) tests\pytorch\test_nn.py:740: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\tagconv.py:126: in forward 'both')(graph, edge_weight) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:117: in forward reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g0-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1578, 0.9274, -0.3338, -1.0208, -1.7261], [-0.9936, -1.2473, 0.3943, 0.6899, -0.1839], ...0.1618], [-0.0325, 0.3879, 0.8579, 0.0226, 0.6298], [-0.9232, -0.7955, -0.4868, -0.0830, 0.7374]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g0-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0631, 1.7769, -1.5049, 0.4322, -0.6622], [ 0.4781, 1.2847, 1.2636, 0.9980, 1.5270], ...1.0043], [ 1.1717, 0.3404, -0.2702, 1.1591, 0.4501], [-1.4900, -0.3216, 0.1101, 0.3037, -0.7344]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g1-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1399, 1.2955, 0.1397, 0.2387, -0.4162], [ 1.7430, -0.1689, 0.9269, 0.0209, 0.4076], ...0.6856], [-1.1189, 0.7819, 1.8479, 2.0297, -0.6487], [ 0.9764, -0.0551, -0.6369, -2.4758, 0.6960]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g1-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1854, 0.6338, -2.5521, 0.6106, -0.2610], [-0.7510, -0.6518, 2.0678, 1.6954, -0.2636], ...0.0880], [ 0.4399, 0.0286, -0.3754, 0.9592, -1.1065], [ 0.8646, -0.7142, 0.4081, 1.4733, -0.2494]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g2-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0171, -0.8855, 1.4332, -1.4715, 0.6713], [-0.2114, 0.7143, 0.7356, -0.5021, 0.3700], ...0.2789], [ 0.6205, 0.8589, 0.8029, -0.9879, 1.8120], [ 1.5201, 0.4331, 0.5148, -1.2150, -1.2043]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g2-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4136, -0.0842, -0.0607, 2.7614, 0.9834], [-0.4795, 1.7599, -1.2918, 0.4193, 1.5680], ...0.2102], [ 0.4479, 2.2428, 0.7148, -0.2024, 1.0989], [ 2.2271, 0.1086, 0.6063, -0.8233, -1.8636]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g3-idtype0] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0597, -0.3504, -0.1914, 0.0507, 0.5362], [ 2.2427, -1.5097, -1.5408, 0.3203, 0.7216], ...2.1791], [ 1.0410, -0.5246, -1.3653, -0.7680, -0.9917], [-0.6230, 1.2993, -0.3299, 0.9430, 0.8012]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g3-idtype1] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5972, 0.0490, 0.0438, -1.3045, -0.4567], [-2.1196, -0.0708, -2.3856, 0.4803, 0.1487], ...1.8917], [ 2.1346, 0.1067, 0.2688, 0.3583, 0.2093], [-1.2903, 0.5724, -1.0920, -0.2209, 0.9032]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g4-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4177, -0.5618, 0.4309, -0.9013, -1.3876], [ 0.7270, 0.1227, 1.0965, -0.4204, 0.6108], ...1.7923], [ 1.0580, -0.3592, 0.5705, 1.2009, 1.9194], [-0.4993, 0.3096, -0.6783, -0.6336, 0.6429]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g4-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3501, -1.4167, -1.0396, -0.3281, 0.5279], [ 0.4625, -0.6377, -1.2637, 1.2002, -0.5915], ...1.6001], [-0.1340, 0.3802, 0.7768, -0.2667, -0.5524], [ 0.6077, 0.0641, -0.5888, -1.6979, -1.3596]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g5-idtype0] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1331, -1.1189, -0.6516, 1.0083, -0.3977], [ 1.2300, 0.0524, 0.1886, -0.6744, 0.0113], ...0.2066], [ 0.7235, -1.1525, 0.0567, -0.6923, 0.4674], [ 0.4098, 1.3261, -1.3476, 2.8055, -1.6596]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g5-idtype1] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.4072, -0.9553, -2.5469, -1.1501, 0.2915], [-1.3309, -2.0913, 0.3251, 0.1511, -0.5105], ...2.0513], [ 0.0969, -0.6941, 0.5711, 1.2488, 0.6080], [ 0.0606, 0.9393, 0.7249, 0.3028, 0.0175]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g6-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2209, 0.7108, 0.5997, -0.0613, 0.3337], [-0.9931, 0.8302, -0.5921, 0.2965, -0.2925], ...0.2222], [ 1.4555, -0.3253, -0.5436, 0.0366, 0.4359], [-1.6682, -1.2368, -0.5446, 0.9565, 0.7209]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gin_conv[mean-g6-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2938e+00, -1.2575e+00, 5.1674e-01, 2.1682e+00, -4.2185e-01], [ 2.2190e-03, -3.9464e-01, 7.4524e...01, -6.6164e-01, -4.9276e-01, -1.2803e+00], [-2.6160e-01, -9.0249e-01, -5.9452e-01, -2.2122e+00, 4.5144e-01]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g0-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 0.4521, -0.0279, -0.5417, 0.4252, 0.4747], [-1.1361, 1.6613, 0.2875, 1.0265, -0.6965], ...0.7936], [-1.3712, 0.8832, 0.0483, -0.4799, -1.0016], [ 0.4824, 1.4265, -0.9215, 0.5297, 0.1564]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g0-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[-0.0891, 0.0099, 1.2986, -0.2674, 0.8560], [ 0.9391, -0.2227, 0.0610, -1.5881, 1.7273], ...0.5334], [-0.3030, 0.4995, -1.1955, 0.8334, 0.0156], [-0.2388, 1.5657, 0.4686, 0.7966, -0.8837]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g1-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 1.0191, -0.8505, 0.1193, -0.9345, -1.5733], [-0.8477, 2.5242, 0.8921, 0.4702, 1.6213], ...0.5669], [ 0.1155, -0.9951, -1.2259, 0.2155, 0.2644], [-0.3859, -0.2998, -0.2218, 0.3657, 0.3656]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g1-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 0.1430, 0.4813, -3.2906, 0.2665, 1.8577], [-0.0933, 1.5538, -0.7399, 1.0764, 0.2280], ...0.1311], [ 0.8158, -1.0273, 0.5022, 2.4275, 0.3870], [ 0.6560, 0.6469, 1.0354, -0.3586, 1.8573]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g2-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 0.0590, -1.1471, -0.1841, 1.3831, 0.1204], [-0.6261, 0.0087, 0.0347, -0.3270, 0.9536], ...0.6791], [-0.6236, 0.5164, 2.5420, -1.5327, 0.9765], [ 1.1259, -0.8221, 0.3412, -0.4780, -1.5303]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g2-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 1.5038e-01, -1.0726e+00, 2.3176e+00, 1.6168e+00, 6.8585e-01], [-1.2723e+00, -4.8183e-01, -1.8165e...01, -9.9712e-01, 5.2204e-01, 1.1504e+00], [ 6.2358e-01, 4.0150e-02, -1.1157e+00, -4.8686e-02, 1.0958e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g3-idtype0] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[-0.5151, 0.0405, -2.1773, -0.2679, -0.3728], [-0.2094, -0.1573, 0.8179, 1.0720, 0.4018], ...0.2388], [-2.9170, -0.5191, 0.4771, -1.5897, -0.0954], [ 1.4005, -0.5708, 0.0108, -1.6201, -0.1515]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g3-idtype1] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 0.0801, 1.3213, 0.5106, -0.2097, -1.1012], [-0.4281, -0.1443, -0.5178, -1.2243, 0.8792], ...0.2926], [-0.1945, -0.1867, 0.2236, -0.8844, 0.1374], [ 2.4159, 2.0315, 1.0913, -0.0300, 0.3863]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g4-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[-1.9890, 1.5519, -0.2495, -0.8028, -0.2431], [ 0.1178, -0.6053, -0.3829, 1.6680, 1.0775], ...0.0449], [-0.9177, 0.9627, 2.9129, 1.7519, 1.4250], [ 0.5581, 0.7741, -0.3476, 0.2286, -0.9125]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g4-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 0.1476, 0.7466, -0.7712, 0.9318, 1.1660], [-0.8856, -0.3422, 1.2024, -0.6597, 0.4257], ...0.8630], [ 1.5186, -0.8865, -1.2251, 0.5028, -0.3721], [ 1.0502, 2.1292, -0.8927, 0.9118, -0.1986]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g5-idtype0] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[-0.7037, -0.3709, -1.1668, -0.3380, -1.5027], [-0.2211, 0.5467, 0.6273, -0.1265, 0.1151], ...0.1411], [ 0.9007, -0.9212, -0.2278, 0.7187, -0.2668], [ 1.0508, -0.0484, -0.2209, 1.3864, 2.8027]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g5-idtype1] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 0.5347, -1.3889, 1.0142, 0.9753, 1.1949], [-0.0308, -0.8895, 0.6418, 0.1913, -0.4456], ...2.7313], [ 0.9385, 0.0660, 0.8051, -0.7371, 0.7952], [-0.2538, 1.3538, 0.2255, -0.2889, -0.6752]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g6-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[-1.3043, -1.4004, -0.8137, -0.4508, -1.3827], [-1.1136, 1.1781, 0.0123, -0.2525, -0.1335], ...1.4103], [-0.9252, 1.4059, -1.1902, -0.8723, -0.8557], [ 1.7429, -2.2370, 0.4029, 1.3454, 1.0214]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[max-g6-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 1.1308, -1.1358, 0.6479, -2.0350, 0.4969], [ 0.4511, -1.2825, 0.0696, 0.6036, 1.2983], ...0.9945], [ 0.9168, 0.0463, -0.5607, 1.3766, 1.4715], [ 0.9571, 2.1828, 0.2147, -0.7896, 0.7244]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g0-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.5834e+00, 1.5742e+00, -1.0513e+00, 2.3277e+00, 3.8425e-01], [ 5.8044e-01, -5.5099e-04, 1.4138e...00, -2.0909e+00, 8.4466e-01, -4.4795e-01], [ 7.9799e-01, 8.3325e-02, -2.5030e-01, 1.4930e-01, 1.7076e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g0-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6558, -0.9535, -0.9455, -1.4452, 0.6496], [ 1.4147, -1.5048, 0.7882, -0.5024, 0.3199], ...1.8232], [ 0.6205, -1.6389, -0.0143, -1.6953, -0.0999], [ 0.5037, -2.0145, 1.4395, 1.2640, -1.7950]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g1-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-5.8432e-01, -8.6030e-02, -5.8102e-01, -1.8006e+00, -1.3131e-02], [-1.2956e+00, 1.8444e-01, 2.0077e...01, 6.4482e-01, -7.4000e-02, 1.5370e-01], [ 1.8526e+00, -2.7075e-01, 6.1798e-01, -1.9956e-01, 2.0947e-01]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g1-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0516, -1.1410, 1.5335, -0.0352, 0.5650], [ 0.4788, 0.5885, 0.8207, 1.3696, -0.2886], ...0.8250], [ 0.7430, 0.0500, 1.3693, 0.2391, -0.6103], [-0.1752, -0.0939, -0.2892, -0.5817, -1.4244]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g2-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 8.5057e-02, 1.3194e+00, -4.5185e-01, 1.9764e+00, 1.1466e-03], [ 2.1355e+00, -6.3669e-01, 7.4861e...01, -1.2152e+00, 3.2271e+00, -1.2409e-01], [-2.0427e-01, 5.1231e-01, -7.7207e-01, -2.0429e+00, 6.8140e-01]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g2-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3646, -1.2615, 0.2554, -0.5286, 0.7959], [-0.9727, 0.2038, 0.1229, -0.6403, -1.9579], ...1.0620], [ 0.3966, -0.8025, -0.4366, -0.4278, 1.4910], [ 1.5326, -0.3118, -0.5521, 0.7089, 0.1363]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g3-idtype0] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2012, -0.4383, 0.4052, 0.6541, 0.1973], [ 0.7956, 1.3828, -0.1019, -0.1491, -2.6134], ...0.1178], [ 0.2065, 0.6472, 0.0783, 0.5443, 0.6133], [-1.7336, -0.3745, -0.4009, 0.1746, 0.0723]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g3-idtype1] ________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3851e+00, -1.6531e+00, 3.1130e-01, -1.0454e+00, 5.5015e-01], [-9.2134e-01, 3.6062e-01, -1.5127e...01, -1.1124e+00, 1.1652e+00, 2.0797e+00], [-1.2967e+00, 3.8297e-01, -1.3795e+00, -5.6451e-01, 1.6123e-01]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g4-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0411, 0.6564, -0.3562, -2.7572, 2.1971], [ 1.6199, 1.8120, -1.4224, 0.5631, 1.5604], ...0.2546], [ 0.1667, 0.4026, 0.5303, 0.1654, 0.3171], [-0.2986, -0.9095, -0.8957, 0.4363, 2.2527]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g4-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7913, 1.2642, 0.2892, 1.0980, 0.1619], [-0.7544, -1.3295, -3.0947, 0.9309, -2.8356], ...0.7324], [ 0.0822, -0.2266, 0.0478, 3.0692, 0.3972], [ 0.6488, 0.7146, 0.3584, -0.8233, 0.3279]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g5-idtype0] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6447, 0.8006, 1.0161, -0.1675, -0.3446], [ 1.0337, -0.6133, 0.9268, -0.1681, 0.1925], ...1.2575], [-0.6836, -0.0145, -0.7349, -1.3590, 0.5426], [ 1.0773, -0.6362, 0.1814, -0.4191, -1.3380]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g5-idtype1] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-7.8052e-01, 8.0543e-03, -7.0697e-01, -4.1293e-01, 1.8542e+00], [ 1.0028e-01, 1.7926e+00, 9.3330e...01, -1.8927e+00, 8.7882e-01, 2.3596e-01], [-1.5516e+00, -9.8148e-01, 4.5159e-01, 5.4020e-02, -1.0345e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g6-idtype0] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5355, 0.0127, -1.2381, -0.4347, -0.4400], [-0.1713, -1.3838, 0.1933, -0.0511, -0.5979], ...0.1377], [ 0.8662, 1.3950, -1.0709, 0.0998, -0.0266], [ 0.4959, -0.6125, -1.3473, -0.0432, 0.5150]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gin_conv[sum-g6-idtype1] ________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) th.save(gin, tmp_buffer) feat = F.randn((g.number_of_src_nodes(), 5)) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:756: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-4.1125e-01, -1.3159e+00, 4.4266e-01, -1.9466e+00, -1.0117e+00], [-2.4040e-01, -2.3057e-01, -5.3927e...01, 2.3986e-01, 9.3775e-01, -1.6689e+00], [-4.9664e-01, 4.8946e-01, -5.9527e-01, -4.8277e-02, 1.8125e+00]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g0-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hn': Scheme(shap...32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.8256, 0.0299], [0.0000, 0.0000, 0.4868, 0.0000, 0.0000], [0.0000, 0..., 1.2652, 1.1702], [0.0000, 0.0000, 1.1649, 0.0000, 0.6106], [2.7970, 0.0000, 0.0000, 0.8303, 0.0000]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g0-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hn': Scheme(shap...32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[1.7903, 2.6116, 0.0000, 0.0000, 1.1927], [2.0723, 2.7633, 0.0000, 0.0000, 0.6454], [1.6419, 0..., 0.0000, 0.0000], [0.0000, 0.8300, 0.6411, 0.0000, 0.4664], [1.0613, 0.0000, 1.4560, 1.9865, 1.3356]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g1-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hn': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[1.6155, 1.9008, 0.0000, 2.6108, 0.7356], [0.0000, 0.0000, 0.0000, 1.3317, 0.5397], [0.1237, 0..., 0.0000, 0.1963], [0.0000, 0.0000, 1.7064, 0.7801, 0.0000], [0.0000, 0.0000, 1.8982, 0.0120, 1.6840]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g1-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hn': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.3305, 0.0000], [1.3401, 3.5389, 0.0000, 1.2314, 0.0568], [0.5184, 0..., 2.3965, 0.7931], [0.0000, 1.9356, 0.0000, 0.0000, 0.0000], [0.0000, 1.4872, 0.0000, 0.7977, 0.0692]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g2-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hn': Scheme(shap... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[1.5884, 0.0000, 0.4125, 0.1621, 0.0000], [0.0000, 0.0000, 1.1134, 1.9789, 0.0000], [0.6874, 0..., 1.0716, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 3.2395], [0.0000, 1.4643, 0.0000, 0.0000, 0.0000]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g2-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hn': Scheme(shap... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[1.3293, 0.0000, 0.0000, 1.2173, 0.0000], [0.0000, 0.0000, 0.0000, 1.9565, 0.0000], [1.1898, 1..., 0.0000, 0.0000], [0.0000, 0.0000, 1.0648, 0.0000, 0.0000], [0.6323, 0.0000, 0.0000, 0.0000, 1.6663]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g3-idtype0] __________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'hn': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.3667, 1.9691, 0.4603, 0.0000, 0.0000], [1.7481, 0.9016, 0.0000, 0.0000, 0.2810], [0.0000, 0..., 1.8820, 0.0000], [0.5558, 0.3186, 2.0039, 1.2265, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.3794]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g3-idtype1] __________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'hn': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[1.0244, 0.0000, 0.0000, 1.4446, 0.0000], [2.8809, 0.0000, 2.4141, 0.0000, 0.0000], [2.1454, 0..., 0.0000, 0.0000], [2.4529, 0.3300, 0.7640, 0.0000, 0.5678], [3.2922, 0.0000, 0.0000, 0.0842, 0.9365]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g4-idtype0] __________________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'hn': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.0000, 2.6615, 0.0000, 0.0000, 0.0000], [0.0000, 0.1395, 0.0000, 0.9603, 0.0000], [0.0503, 2..., 0.3030, 0.2011], [2.0162, 0.8744, 0.8356, 0.0000, 0.0000], [0.0000, 0.7378, 0.0000, 0.0000, 0.0000]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g4-idtype1] __________________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'hn': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.5706, 0.0000, 0.0000, 0.6787, 0.0000], [0.2065, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0..., 1.2663, 0.0000], [0.0000, 0.7969, 0.8487, 0.7118, 1.7750], [0.0000, 0.0000, 0.9077, 0.0000, 0.0000]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g5-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hn': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.0477, 0.0000, 1.1184, 1.8947, 0.8979], [1.0122, 0.0000, 0.3587, 0.0000, 2.0935], [1.9103, 0..., 0.0000, 0.0000], [0.3241, 0.0000, 1.2479, 1.6853, 1.9686], [0.1791, 0.6173, 0.0000, 0.3224, 0.0966]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g5-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hn': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[2.0392, 0.9983, 0.0000, 0.0000, 0.0000], [1.9022, 0.0000, 0.0000, 1.0003, 0.4638], [0.0000, 0..., 0.0000, 0.0000], [0.0000, 0.2917, 0.0000, 1.4858, 0.2600], [0.5705, 0.0000, 0.0000, 0.0102, 1.2529]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g6-idtype0] __________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.0000, 0.0000, 0.9681, 0.1453, 0.0000], [0.6030, 0.0000, 0.0000, 0.7270, 0.0000], [0.0000, 0.0000, 0.7993, 0.4565, 0.1917]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g6-idtype1] __________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.7894, 1.5370, 1.3254, 0.0000, 0.0000], [2.0225, 0.0000, 2.1215, 0.6305, 0.0000], [0.0000, 0.0000, 0.0000, 0.1327, 2.2635]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g7-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hn': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.0000, 0.0000, 0.2371, 0.3413, 0.0000], [0.0000, 0.1846, 0.1482, 0.0000, 0.0000], [0.0000, 0..., 0.0000, 0.0000], [0.6473, 0.0000, 0.6255, 0.0000, 0.0000], [0.0000, 0.0635, 0.0000, 0.0000, 0.0000]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_gine_conv[g7-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hn': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(5,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'])) def test_gine_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) gine = nn.GINEConv( th.nn.Linear(5, 12) ) th.save(gine, tmp_buffer) nfeat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.num_edges(), 5)) gine = gine.to(ctx) > h = gine(g, nfeat, efeat) tests\pytorch\test_nn.py:780: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[1.0612, 0.2476, 0.0000, 0.2346, 0.0000], [0.0000, 1.4963, 0.7653, 0.0000, 0.1703], [0.0000, 0..., 0.7863, 2.5533], [0.7673, 0.1044, 0.0000, 0.0000, 0.0000], [3.3851, 0.0000, 2.6849, 0.5741, 0.0000]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[mean-g0-idtype0] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3758, 0.9582, -0.1891, -1.0195, -1.0334], [-1.0342, 0.4768, 1.0738, 0.3700, 0.2138]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[mean-g0-idtype1] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5494, -0.9025, 0.0296, 1.7940, -1.3260], [-0.7833, -0.3094, 0.1658, 0.7158, -0.4555]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[mean-g1-idtype0] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3486, 1.7885, 1.6667, -1.0430, 0.2748], [-0.1044, 1.4744, -0.1749, 0.7598, -0.7891], ...0.7025], [-1.3281, 0.3173, -0.2705, 0.0836, -1.8137], [ 1.1399, -0.1268, 2.2136, -0.2804, 1.3412]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[mean-g1-idtype1] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 aggregator_type = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2667, 0.6800, -0.3013, -1.3854, -1.0632], [-0.6010, 0.5442, 0.1422, 0.9451, -0.1827], ...0.7827], [ 0.7350, -0.3517, -0.9609, -0.6293, -0.1873], [ 1.5129, 0.0341, -1.7065, -0.0139, -0.1646]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[max-g0-idtype0] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[-0.9783, 0.7490, 1.7305, 0.7558, 0.9908], [-0.8629, -1.5892, -0.5262, 0.3558, 0.6287]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[max-g0-idtype1] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 0.1971, 0.1732, -1.0025, 0.5148, 0.1992], [ 0.8224, 0.9831, -0.5843, 0.2126, 1.2017]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[max-g1-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[-1.1819, -0.0746, -0.6652, -0.5348, -1.4933], [-0.7484, -0.2941, 0.6376, 0.5217, -0.8410], ...0.0198], [-0.8757, -1.9040, -0.4350, -1.6887, -1.0629], [ 0.0620, 0.0948, -0.5812, 0.6093, -0.2238]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[max-g1-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 aggregator_type = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[ 1.0092, 0.0657, -1.2470, 2.0574, 0.7177], [-1.4195, -1.1926, -0.3136, 0.0221, 0.0828], ...0.0683], [-1.3504, 1.8403, -0.0311, 0.3088, -0.2242], [ 0.2382, -1.4836, -0.2210, -0.3738, -0.9012]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[sum-g0-idtype0] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0292, -0.4030, 0.0877, -0.4273, 0.8367], [ 0.3599, 0.1665, 0.5510, -0.6425, 0.1151]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[sum-g0-idtype1] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8157, -0.4870, 0.9038, 0.0176, 1.4160], [-0.6496, 0.2211, -1.6208, -1.5805, -0.3934]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[sum-g1-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1510, 0.0308, 1.0674, -0.4953, 0.0634], [-1.1121, 1.2815, 1.5259, 1.1717, 0.3433], ...0.2117], [ 0.5163, -0.6465, -0.4302, 0.2935, -1.4722], [-0.1389, -0.4731, 0.7472, -0.6184, -0.5542]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gin_conv_bi[sum-g1-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 aggregator_type = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum']) def test_gin_conv_bi(g, idtype, aggregator_type): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gin = nn.GINConv( th.nn.Linear(5, 12), aggregator_type ) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) gin = gin.to(ctx) > h = gin(g, feat) tests\pytorch\test_nn.py:803: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9048, 1.1661, 0.7406, -1.3596, -0.7848], [ 1.1724, 1.0570, -0.9294, 0.4843, -0.8379], ...0.9104], [-0.3894, -0.3575, -0.9595, -1.4570, 1.5026], [-0.0308, -0.7701, -0.8868, -1.0777, 0.3063]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_agnn_conv[g0-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[ 0.4286, -0.5061, 0.4009, -0.6207, -0.1192], [ 0.5157, 0.2645, -0.3096, -0.4872, 0.5752], ...0.8080], [-0.7636, 0.0049, -0.3576, -0.4648, -0.2701], [ 0.1654, 0.5234, 0.0197, -0.7705, 0.3234]]) rhs_data = tensor([[ 0.4286, -0.5061, 0.4009, -0.6207, -0.1192], [ 0.5157, 0.2645, -0.3096, -0.4872, 0.5752], ...0.8080], [-0.7636, 0.0049, -0.3576, -0.4648, -0.2701], [ 0.1654, 0.5234, 0.0197, -0.7705, 0.3234]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g0-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[-0.6243, -0.0730, -0.2763, -0.1868, 0.7026], [ 0.2801, 0.4135, -0.4730, 0.1745, 0.7045], ...0.0982], [-0.5889, -0.2168, 0.3906, -0.2736, 0.6154], [-0.1575, 0.1891, 0.9489, 0.0608, -0.1881]]) rhs_data = tensor([[-0.6243, -0.0730, -0.2763, -0.1868, 0.7026], [ 0.2801, 0.4135, -0.4730, 0.1745, 0.7045], ...0.0982], [-0.5889, -0.2168, 0.3906, -0.2736, 0.6154], [-0.1575, 0.1891, 0.9489, 0.0608, -0.1881]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g1-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[ 0.0038, 0.5131, 0.7743, 0.1452, -0.3407], [-0.8095, 0.5654, -0.1402, 0.0566, 0.0479], ...0.0758], [-0.0254, -0.2235, 0.6427, -0.6270, -0.3785], [ 0.2479, 0.8567, 0.1856, 0.3713, 0.1796]]) rhs_data = tensor([[ 0.0038, 0.5131, 0.7743, 0.1452, -0.3407], [-0.8095, 0.5654, -0.1402, 0.0566, 0.0479], ...0.0758], [-0.0254, -0.2235, 0.6427, -0.6270, -0.3785], [ 0.2479, 0.8567, 0.1856, 0.3713, 0.1796]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g1-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[-0.5450, -0.1808, 0.5547, 0.1318, -0.5876], [ 0.5856, -0.5868, 0.4113, -0.3571, 0.1268], ...0.3070], [ 0.3371, 0.5547, -0.7204, -0.2011, 0.1392], [ 0.1642, 0.0729, -0.2250, 0.8531, -0.4351]]) rhs_data = tensor([[-0.5450, -0.1808, 0.5547, 0.1318, -0.5876], [ 0.5856, -0.5868, 0.4113, -0.3571, 0.1268], ...0.3070], [ 0.3371, 0.5547, -0.7204, -0.2011, 0.1392], [ 0.1642, 0.0729, -0.2250, 0.8531, -0.4351]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g2-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[-0.6645, 0.5695, -0.3784, -0.1809, -0.2414], [ 0.2985, -0.7870, 0.2828, 0.2643, -0.3764], ...0.5883], [ 0.2286, 0.1554, 0.0372, 0.9457, -0.1668], [-0.0631, 0.0087, -0.8602, 0.3612, 0.3543]]) rhs_data = tensor([[-0.6645, 0.5695, -0.3784, -0.1809, -0.2414], [ 0.2985, -0.7870, 0.2828, 0.2643, -0.3764], ...0.5883], [ 0.2286, 0.1554, 0.0372, 0.9457, -0.1668], [-0.0631, 0.0087, -0.8602, 0.3612, 0.3543]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g2-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[ 0.3846, 0.6352, -0.6198, 0.2259, 0.1157], [ 0.1377, 0.3615, -0.5599, 0.7189, 0.1415], ...0.8033], [ 0.3070, 0.0908, 0.8182, 0.4413, -0.1826], [-0.6200, -0.1613, -0.4019, -0.4914, 0.4320]]) rhs_data = tensor([[ 0.3846, 0.6352, -0.6198, 0.2259, 0.1157], [ 0.1377, 0.3615, -0.5599, 0.7189, 0.1415], ...0.8033], [ 0.3070, 0.0908, 0.8182, 0.4413, -0.1826], [-0.6200, -0.1613, -0.4019, -0.4914, 0.4320]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g3-idtype0] __________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[ 0.1998, 0.6931, -0.4464, 0.5090, -0.1461], [ 0.3102, -0.0704, -0.1842, 0.2706, 0.8898], ...0.1013], [ 0.3624, 0.3080, 0.0491, 0.2483, 0.8425], [-0.7690, 0.3942, -0.2130, 0.4483, 0.0835]]) rhs_data = tensor([[ 0.1998, 0.6931, -0.4464, 0.5090, -0.1461], [ 0.3102, -0.0704, -0.1842, 0.2706, 0.8898], ...0.1013], [ 0.3624, 0.3080, 0.0491, 0.2483, 0.8425], [-0.7690, 0.3942, -0.2130, 0.4483, 0.0835]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g3-idtype1] __________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[-0.1265, -0.7315, 0.5625, -0.3529, -0.0896], [-0.5474, 0.1471, 0.7415, 0.0966, 0.3458], ...0.0119], [-0.2535, -0.1942, 0.7298, 0.1781, 0.5776], [ 0.7240, 0.2574, 0.0281, -0.0540, -0.6371]]) rhs_data = tensor([[-0.1265, -0.7315, 0.5625, -0.3529, -0.0896], [-0.5474, 0.1471, 0.7415, 0.0966, 0.3458], ...0.0119], [-0.2535, -0.1942, 0.7298, 0.1781, 0.5776], [ 0.7240, 0.2574, 0.0281, -0.0540, -0.6371]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g4-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[-0.2267, 0.8288, -0.5061, -0.0075, -0.0735], [ 0.2321, -0.5285, 0.3473, -0.2443, 0.6975], ...0.6429], [-0.2003, 0.3505, 0.8049, 0.1947, 0.3890], [-0.4860, 0.1049, 0.7086, 0.2431, 0.4377]]) rhs_data = tensor([[-0.2267, 0.8288, -0.5061, -0.0075, -0.0735], [ 0.2321, -0.5285, 0.3473, -0.2443, 0.6975], ...0.6429], [-0.2003, 0.3505, 0.8049, 0.1947, 0.3890], [-0.4860, 0.1049, 0.7086, 0.2431, 0.4377]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g4-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[-0.6677, 0.2939, 0.0287, -0.0568, -0.6810], [ 0.2680, -0.8927, 0.2729, -0.2278, 0.0696], ...0.1314], [ 0.3086, 0.0765, -0.1365, 0.6202, -0.7040], [ 0.2117, 0.0861, -0.6551, -0.7159, -0.0782]]) rhs_data = tensor([[-0.6677, 0.2939, 0.0287, -0.0568, -0.6810], [ 0.2680, -0.8927, 0.2729, -0.2278, 0.0696], ...0.1314], [ 0.3086, 0.0765, -0.1365, 0.6202, -0.7040], [ 0.2117, 0.0861, -0.6551, -0.7159, -0.0782]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g5-idtype0] __________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[-0.3977, 0.2251, -0.8785, -0.0774, 0.1164], [-0.4034, 0.6895, -0.2428, 0.1087, -0.5395], ...0.0890], [-0.1768, -0.8609, 0.2600, -0.3795, 0.1266], [ 0.3800, 0.6862, -0.2795, 0.0117, 0.5535]]) rhs_data = tensor([[-0.3977, 0.2251, -0.8785, -0.0774, 0.1164], [-0.4034, 0.6895, -0.2428, 0.1087, -0.5395], [ 0.8165, -0.1868, 0.4614, 0.2877, 0.0517]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g5-idtype1] __________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[ 0.6973, 0.0855, -0.1773, -0.5932, -0.3510], [-0.4243, 0.5767, -0.0593, -0.6103, 0.3336], ...0.3158], [-0.7679, -0.3975, -0.0985, 0.3186, 0.3757], [ 0.7443, -0.5032, 0.3005, 0.2271, 0.2257]]) rhs_data = tensor([[ 0.6973, 0.0855, -0.1773, -0.5932, -0.3510], [-0.4243, 0.5767, -0.0593, -0.6103, 0.3336], [-0.1072, 0.0329, -0.5464, -0.5862, -0.5876]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g6-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[-0.9683, -0.1114, -0.0521, 0.0175, -0.2166], [ 0.3572, -0.7885, 0.2879, -0.3184, 0.2576], ...0.3020], [-0.3313, -0.0581, -0.4103, -0.2573, -0.8077], [ 0.7230, 0.4639, 0.0691, 0.3755, 0.3411]]) rhs_data = tensor([[-0.9683, -0.1114, -0.0521, 0.0175, -0.2166], [ 0.3572, -0.7885, 0.2879, -0.3184, 0.2576], ...0.3020], [-0.3313, -0.0581, -0.4103, -0.2573, -0.8077], [ 0.7230, 0.4639, 0.0691, 0.3755, 0.3411]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_agnn_conv[g6-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5,), dtype=torch.float32), 'norm_h': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:814: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[-0.4028, 0.1882, -0.0703, 0.6326, -0.6303], [ 0.7948, 0.1143, 0.3665, -0.1892, 0.4302], ...0.3724], [-0.0222, -0.3507, 0.2915, -0.8669, -0.2002], [-0.2782, -0.5298, -0.3185, -0.6043, -0.4187]]) rhs_data = tensor([[-0.4028, 0.1882, -0.0703, 0.6326, -0.6303], [ 0.7948, 0.1143, 0.3665, -0.1892, 0.4302], ...0.3724], [-0.0222, -0.3507, 0.2915, -0.8669, -0.2002], [-0.2782, -0.5298, -0.3185, -0.6043, -0.4187]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_agnn_conv_bi[g0-idtype0] ________________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) def test_agnn_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:825: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[ 0.0225, 0.9611, 0.0094, 0.2695, 0.0559], [-0.0736, -0.6987, 0.5495, -0.2351, -0.3863]]) rhs_data = tensor([[ 0.0939, 0.5455, -0.2364, -0.4320, -0.6717], [ 0.2838, 0.1115, -0.1927, 0.1573, -0.9193], [ 0.4359, -0.4086, 0.6233, 0.1291, -0.4877], [-0.3743, 0.7116, 0.3030, -0.4810, 0.1743]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_agnn_conv_bi[g0-idtype1] ________________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) def test_agnn_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:825: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[ 0.7318, -0.2515, 0.5226, -0.0065, -0.3578], [-0.3914, -0.0922, -0.2712, 0.7205, 0.4957]]) rhs_data = tensor([[-0.0176, 0.5481, 0.2368, 0.6908, 0.4074], [-0.1797, -0.6486, 0.1299, 0.6318, 0.3619], [ 0.4815, 0.2730, -0.0349, -0.1926, 0.8095], [-0.3582, 0.6670, 0.4827, -0.0349, 0.4387]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_agnn_conv_bi[g1-idtype0] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) def test_agnn_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:825: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[-0.2893, -0.2946, 0.3116, -0.6415, 0.5665], [ 0.1616, 0.1738, 0.1269, 0.0087, -0.9631], ...0.4032], [-0.4777, -0.5367, -0.4411, 0.3914, -0.3688], [-0.1720, -0.7269, -0.3300, 0.3665, 0.4460]]) rhs_data = tensor([[-0.3404, -0.5240, -0.1260, -0.7705, 0.0011], [ 0.0942, -0.3938, 0.2178, -0.8228, 0.3340], [ 0.0639, -0.1988, 0.0763, 0.6668, -0.7113]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ________________________ test_agnn_conv_bi[g1-idtype1] ________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) def test_agnn_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) agnn = agnn.to(ctx) > h = agnn(g, feat) tests\pytorch\test_nn.py:825: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\agnnconv.py:153: in forward graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[ 0.0763, 0.6179, 0.7062, 0.2729, 0.1978], [-0.0653, -0.9371, -0.2681, 0.2031, 0.0670], ...0.2984], [-0.9346, -0.1018, 0.0289, 0.2653, 0.2121], [ 0.3888, -0.3734, -0.4271, 0.4352, 0.5810]]) rhs_data = tensor([[-3.1542e-01, -1.5139e-01, 2.1705e-01, 1.4768e-02, -9.1119e-01], [-2.5411e-01, 8.1217e-01, -1.1445e-01, -2.8103e-01, 4.2864e-01], [ 2.6396e-04, 2.4084e-01, -6.0702e-01, -5.4679e-01, 5.2397e-01]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_gated_graph_conv[g0-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 1.9701, -1.5809, -0.9174, 1.1354, -2.0402, -2.6653, 0.3838, -0.1625, 0.7666, 0.8643], [...0.1865, -2.9586, -1.7290, -0.2510, 3.2699, 0.5544, -0.9090, 1.6071, 0.8992]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g0-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 1.0021, -1.2659, 0.7103, -0.0434, 1.8240, 0.2811, -0.3031, -0.7373, -0.1379, -1.0032], [...0.3836, -0.4799, 1.2181, -1.2509, 0.0240, -1.1274, -0.3704, -0.3235, -0.2490]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g1-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-2.5566, -0.6239, -1.9764, 1.5942, 0.0807, -3.3504, 0.9141, -1.7515, 0.8498, -0.7732], [...1.0434, 0.0997, -0.0060, -0.0236, 0.1211, 0.2912, -1.6525, -1.1316, -0.4106]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g1-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 3.3253e-02, -2.2311e-01, -9.8281e-01, 6.7130e-01, -2.9924e-02, -1.1228e+00, -1.9009e-03, 7.5394e-...42e-01, -2.5462e-01, -3.9647e-01, 4.9793e-01, 4.5027e-02, -9.9199e-01]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g2-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 5.5361e-01, -1.5942e-01, -1.3038e+00, -4.9414e-02, -4.2178e-01, -1.4795e+00, -5.2905e-01, -1.7544e-...04e+00, 1.1562e+00, -1.9064e+00, 1.1469e+00, 1.1027e+00, 2.0168e-02]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g2-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 0.5290, -3.4911, 0.6114, -0.0053, -1.8625, -1.4583, 2.6483, 0.3852, -1.3438, -1.0683], [...0.0120, -1.2531, -0.1098, -0.3702, -0.9222, -1.0065, -0.5864, -1.6965, -0.5853]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g3-idtype0] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.3450, 0.4842, 0.1922, 0.0640, -0.3673, 0.7398, -1.1455, -1.0007, 0.4845, 2.0563], [...0.3085, -0.0098, -1.4459, -0.1898, -0.5901, -0.6204, 0.6072, 1.3917, 1.1367]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g3-idtype1] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-1.2025e+00, -1.6061e+00, -9.0034e-01, -1.0437e+00, 6.4090e-01, -9.1958e-01, 6.8953e-02, -1.1184e+...76e-01, 5.7717e-01, 1.5296e+00, 7.3805e-01, -1.4248e-01, -2.4844e+00]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g4-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 1.0152e+00, -2.4924e-01, 1.5077e+00, 5.9481e-01, 1.4205e+00, -1.7157e+00, 1.6703e+00, 9.4764e-...93e-01, -3.7687e-01, -3.5135e-01, -1.1963e+00, 3.3918e-01, -2.8068e-01]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g4-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.7958, 0.0785, -0.4371, -1.3536, -1.0355, 0.8085, 0.2263, 1.0418, -0.3464, 0.9260], [...0.1908, -0.1929, -1.6428, -1.6145, 0.4732, 1.4833, -0.3811, 0.2378, 0.5424]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g5-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-6.5399e-01, 2.3313e-01, 1.1944e+00, -1.1540e+00, -4.4744e-01, -9.4751e-01, 3.2691e-01, 8.1827e-...70e+00, 6.6351e-01, 5.5303e-01, 1.0211e+00, -1.5889e+00, 6.1944e-02]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ______________________ test_gated_graph_conv[g5-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 3) etypes = th.arange(g.number_of_edges()) % 3 feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-1.5005, 0.7843, -0.2795, -0.1400, -0.8133, 0.1751, -0.2120, -0.6873, -1.0987, 0.4442], [...0.3670, -0.3157, 0.5572, 0.6258, -1.5046, 1.2371, -0.6116, -1.0062, -0.3811]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g0-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.6897, -1.1861, 1.6512, -0.7090, -0.0858, -0.9571, 0.5387, 1.1565, -1.6776, 0.1337], [...0.8486, 1.9717, -0.7752, 1.3342, 0.0961, -0.5752, -0.5053, 0.6519, -0.4360]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g0-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.0134, 0.3718, 0.3940, 0.4956, 0.8686, 0.9926, -0.4224, 0.5083, 0.2102, 2.1514], [...0.4049, 0.2793, 0.0671, -1.3619, -0.9489, 0.4725, 0.1181, -0.0904, -3.2025]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g1-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.3178, -0.8938, -1.6870, 0.9792, -1.5222, 0.0930, -0.6055, -0.6056, -0.3570, 0.8798], [...0.8081, -3.2159, 1.3163, -0.5642, -1.9705, 0.1376, -1.6199, -2.5948, 1.2127]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g1-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 0.8741, -0.6940, 1.2184, 0.9291, -1.1192, 0.5512, 0.5371, 0.4918, 1.0928, -0.6343], [...0.7946, -0.1105, 0.4282, -0.0574, 0.4263, -0.8628, 0.6048, 1.9482, -0.7608]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g2-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.0136, 0.7415, -0.5897, -0.2060, -1.4802, -0.3587, -0.7062, 0.1650, 0.8711, 0.2058], [...1.1643, -2.7958, -1.1792, -3.6901, -1.6968, -0.1925, 0.0420, 0.9647, 1.8737]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g2-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.5424, -1.6713, -0.2660, 0.5454, 0.5041, 0.7466, 1.0852, 2.6828, -1.1898, -0.3039], [...0.8101, -1.2362, -0.3035, -0.5102, 0.0238, 0.9539, 0.6480, 0.3275, -1.3877]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g3-idtype0] _________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-1.2203e+00, 4.9314e-01, 7.1594e-02, -1.1298e+00, -1.5235e+00, 1.0951e+00, 1.2548e+00, -7.3806e-...61e+00, 8.3022e-01, 7.0108e-01, 1.1366e+00, 1.0659e+00, 1.8892e+00]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g3-idtype1] _________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.0729, -0.1870, 0.1397, 0.5697, -0.0602, 0.2616, -0.7796, 1.1400, -0.4169, -0.5673], [...0.2804, -0.2232, 0.6668, -0.6743, 0.2663, -0.1057, 0.4649, 0.3657, -0.4250]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g4-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-2.2347e-02, -4.0493e-01, -1.7914e-01, 4.0358e-01, 1.2097e-01, 1.1547e+00, 3.5966e-01, 1.2723e+...95e+00, 1.0301e+00, -8.5566e-01, -1.0406e-01, -1.5340e+00, -1.2449e+00]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g4-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.4417, -1.9879, -1.2170, 1.7168, -0.5137, -0.3298, -0.5861, -1.4225, -2.3076, -0.4068], [...2.6698, -3.0065, -0.3171, 0.1134, 2.5033, -2.0765, -1.7769, -1.3350, 0.9011]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g5-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 0.9016, -3.2595, -0.7682, 1.6626, -1.0176, -1.0124, 0.1687, 0.1683, 2.0540, 1.1160], [...1.6954, -0.5533, 0.1701, 0.3437, 0.9253, 1.1491, 0.1864, -0.1712, -0.0799]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_gated_graph_conv_one_etype[g5-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'W_e*h': Scheme(shape=(10,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gated_graph_conv_one_etype(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) ggconv = nn.GatedGraphConv(5, 10, 5, 1) etypes = th.zeros(g.number_of_edges()) feat = F.randn((g.number_of_nodes(), 5)) ggconv = ggconv.to(ctx) etypes = etypes.to(ctx) > h = ggconv(g, feat, etypes) tests\pytorch\test_nn.py:854: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gatedgraphconv.py:166: in forward graph.update_all(fn.copy_e('W_e*h', 'm'), fn.sum('m', 'a')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-1.1769e+00, -3.1107e-02, -6.1376e-01, -3.3871e-01, -6.5688e-01, 2.9310e-01, -4.7821e-01, 6.7372e-...63e+00, 1.3877e+00, 3.2194e-01, 6.0567e-01, -7.0362e-01, 3.1825e-01]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g0-idtype0] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 1.6105], [ 1.1448], [ 1.4999], [-0.5556], [-0.4088]], [[ 0.094... [ 0.1699]], [[-0.2861], [ 0.7597], [-2.4939], [-1.0669], [ 0.1899]]]) rhs_data = tensor([[[ 8.2576e-01, -4.7050e-01, 7.7563e-01, 8.2536e-01, 2.4392e-01, -4.5868e-01, -6.0147e-01, 1.8605....2142e-01, -4.8486e-01, -6.3138e-02, -4.2237e-01, 3.2299e-01, -2.8527e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g0-idtype1] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-0.7352], [ 0.2378], [-0.0286], [ 0.2340], [-1.9917]], [[-0.078... [ 0.5483]], [[-2.4911], [-0.8546], [ 1.1509], [-0.1186], [-0.0410]]]) rhs_data = tensor([[[ 8.8120e-01, 1.3773e-02, -5.2749e-01, -3.9969e-01, 3.7879e-01, 2.7060e-01, 3.0852e-01, -2.6666....8076e-01, 2.8510e-01, 2.8224e-01, 1.8351e+00, -2.6469e-01, -5.4991e-02]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g1-idtype0] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-0.6594], [-1.3087], [-1.2918], [-1.1973], [ 0.3114]], [[ 0.131... [ 1.2619]], [[-1.7862], [-1.4680], [-0.0675], [ 0.0438], [ 2.2645]]]) rhs_data = tensor([[[-3.7857e-01, -1.0376e-01, -5.6935e-01, 3.1129e-02, 6.5841e-01, -3.5279e-02, -5.4120e-01, -1.7347....5553e-02, 1.4305e+00, -8.9141e-01, -2.9831e-01, -8.3306e-01, -3.5016e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g1-idtype1] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-1.4509e+00], [ 8.9792e-01], [-1.1763e+00], [-6.7349e-01], [ 2.3233e+00]... [[ 6.9189e-01], [ 2.8482e-01], [-4.7072e-01], [ 1.3066e+00], [ 7.4275e-01]]]) rhs_data = tensor([[[-4.3428e-01, 1.0300e+00, 1.5574e-01, -4.3855e-02, -1.2201e+00, 2.5047e+00, 6.4188e-01, -4.0361....6567e-01, 1.1190e+00, -2.1165e-01, 2.9749e-01, -1.3590e+00, 4.3597e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g2-idtype0] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 1.8373], [ 0.2732], [-0.3591], [ 0.4750], [-0.6398]], [[-0.053... [-0.0438]], [[-0.2916], [-1.6749], [ 0.6482], [ 0.6713], [-1.7338]]]) rhs_data = tensor([[[-2.9097e-01, 5.5693e-01, -1.9549e-01, -6.0476e-01, -2.7407e-01, 6.0159e-01, 2.1911e-04, 1.0940....9840e-01, 1.8744e-02, 4.7853e-01, 1.0043e-01, -6.6755e-02, 6.8396e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g2-idtype1] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 0.7839], [-0.5126], [-1.2214], [-0.8958], [-0.4937]], [[ 1.390... [-1.4965]], [[ 0.5894], [ 0.7096], [ 1.2569], [ 0.6219], [-0.9380]]]) rhs_data = tensor([[[-4.2609e-01, -2.0803e-01, 1.8228e-01, 4.0750e-01, -5.4358e-01, 5.1417e-01, 4.4881e-01, 1.1557....4265e+00, 2.6584e-01, -4.9678e-01, 6.7047e-01, -7.9749e-01, -4.5425e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g3-idtype0] ___________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-0.0903], [-0.3125], [ 0.0568], [-1.8826], [ 0.2567]], [[-0.364... [ 0.7269]], [[ 0.7012], [ 0.6696], [ 0.1366], [ 1.5190], [-1.6184]]]) rhs_data = tensor([[[-5.6525e-01, 1.2339e-02, -2.5646e-01, -1.3117e+00, -5.1914e-01, 1.7384e-01, 1.0961e-01, -6.7540....4561e-01, 6.0565e-01, 3.5897e-01, 8.4287e-01, -1.9219e-01, -2.2849e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g3-idtype1] ___________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 0.5097], [-0.3208], [ 0.1462], [ 0.5108], [ 0.1423]], [[-1.235... [ 1.6017]], [[ 0.9733], [-0.5189], [-1.4980], [-1.3272], [-1.5542]]]) rhs_data = tensor([[[-6.3054e-01, 1.9095e-01, 1.3562e+00, -8.8070e-01, -2.8085e-01, 9.3081e-01, 1.1122e+00, -1.1670....6591e-01, -1.6500e-02, 5.7519e-01, 7.3849e-02, -2.4296e-01, -3.7532e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g4-idtype0] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-9.0886e-01], [-5.5997e-01], [ 1.8524e-01], [-1.7846e+00], [ 2.8472e-01]... [[-8.1769e-01], [-1.2141e+00], [ 1.9991e+00], [ 2.4733e+00], [-1.9136e-02]]]) rhs_data = tensor([[[ 4.2237e-01, 2.6490e-02, -4.1391e-01, -2.8815e-01, -1.4226e-01, -5.9593e-01, -3.9922e-01, -1.8630....8466e-01, 8.7111e-01, 2.7804e-02, 1.6239e-01, -2.7963e-01, 5.6791e-02]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g4-idtype1] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 0.7062], [ 0.4698], [-1.2409], [-0.9853], [ 2.2252]], [[ 0.131... [-2.6137]], [[ 0.9596], [-0.6359], [ 0.1255], [ 0.3129], [ 1.5072]]]) rhs_data = tensor([[[ 5.5353e-01, 1.4861e-01, 1.8772e-01, -5.8536e-01, 8.9622e-01, -6.2239e-01, -5.7919e-01, -3.5456....0265e-01, -8.3651e-01, -5.6061e-01, 1.7238e-01, 1.8418e-01, -8.0785e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g5-idtype0] ___________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-2.5708e+00], [-4.9630e-02], [-1.0974e+00], [ 6.9190e-01], [-1.3132e-01]... [[ 2.1599e-01], [ 1.3091e-01], [ 1.9167e+00], [ 9.7499e-01], [-1.3350e-01]]]) rhs_data = tensor([[[-0.1897, 0.6466, -0.5478, -0.2022, 0.9300, 0.7670, 0.6228, -0.5767, -0.2971, -0.1333], ..., -0.0234, -0.3985, 0.0166, 0.5951, 0.2847, -0.6190, 0.4502, 0.1349, -0.1918]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g5-idtype1] ___________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-0.8700], [ 0.4072], [ 0.6403], [-2.1423], [-0.1622]], [[ 0.828... [-1.0972]], [[-1.2113], [ 0.2733], [-0.7043], [ 0.9543], [-1.6574]]]) rhs_data = tensor([[[-0.3872, 0.2797, 0.1140, -0.2533, 0.0966, 0.0362, -0.5189, -0.5645, 0.2884, -0.1082], ..., 0.1587, -0.5384, 0.0141, -0.4856, -0.6456, 0.4106, -0.2256, -0.2534, 0.3953]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g6-idtype0] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-0.4216], [ 1.7510], [-0.6387], [ 0.6388], [ 1.2004]], [[ 1.158... [ 0.4181]], [[ 0.3234], [-0.6070], [ 0.4604], [ 0.2601], [ 2.0716]]]) rhs_data = tensor([[[-7.1898e-01, -4.0984e-01, -4.5876e-01, 5.2486e-01, 9.4229e-01, 1.0168e+00, -2.4729e-03, -9.7308....4241e-01, -7.1283e-01, -1.8350e-01, -1.0275e+00, -3.6497e-02, -6.1681e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_nn_conv[g6-idtype1] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(5, 1), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(5, 10), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) def test_nn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv(5, 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, feat, efeat) tests\pytorch\test_nn.py:870: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 0.8483], [ 0.6652], [-0.8181], [ 0.1647], [-0.8461]], [[-3.496... [-0.7719]], [[ 1.3900], [-0.0283], [-0.5718], [ 0.2895], [ 0.0654]]]) rhs_data = tensor([[[ 3.1462e-01, -6.8390e-01, 4.3771e-01, -9.8854e-01, -6.3900e-02, -3.8192e-01, -6.0243e-01, 4.4032....5639e-01, -6.9813e-01, 2.1549e-01, 7.4389e-01, -3.0876e-01, -5.9535e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_nn_conv_bi[g0-idtype0] _________________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) def test_nn_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv((5, 2), 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) feat_dst = F.randn((g.number_of_dst_nodes(), 2)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, (feat, feat_dst), efeat) tests\pytorch\test_nn.py:885: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-1.8129], [ 0.0198], [-1.0557], [-1.8359], [ 0.0123]], [[-0.6516], [-0.4092], [-0.1734], [-1.2135], [-0.7169]]]) rhs_data = tensor([[[-2.4746e-01, 5.3657e-01, -1.0729e-01, 1.2967e-01, 7.4316e-01, -5.5548e-01, -1.3860e-01, -1.6261....9881e-02, 4.5117e-01, 2.6996e-01, 3.5154e-01, -7.7012e-01, -1.4374e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_nn_conv_bi[g0-idtype1] _________________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) def test_nn_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv((5, 2), 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) feat_dst = F.randn((g.number_of_dst_nodes(), 2)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, (feat, feat_dst), efeat) tests\pytorch\test_nn.py:885: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-2.4119], [-0.9670], [-1.7840], [-0.6586], [ 1.4624]], [[ 1.9595], [-0.1084], [-0.3641], [-0.9541], [ 0.8652]]]) rhs_data = tensor([[[-0.3702, 0.3196, -0.5047, 0.1652, -0.8513, 0.5248, -1.1570, -0.9323, 0.4260, 0.6290], ..., 0.7234, 0.5843, 0.8130, -1.1965, -0.3629, 1.5739, -1.0989, -1.4331, -0.4681]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_nn_conv_bi[g1-idtype0] _________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) def test_nn_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv((5, 2), 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) feat_dst = F.randn((g.number_of_dst_nodes(), 2)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, (feat, feat_dst), efeat) tests\pytorch\test_nn.py:885: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 0.0668], [-1.0075], [-0.3981], [-1.2391], [-0.5691]], [[ 0.973... [ 0.3488]], [[-0.4802], [-0.2115], [ 0.7891], [-0.0660], [-0.6463]]]) rhs_data = tensor([[[ 2.8435e-01, 1.1743e+00, 1.5007e+00, 4.5370e-01, 7.7482e-01, -7.2070e-01, 4.7197e-01, -4.4477....4213e-01, -8.0057e-01, -2.5315e-01, 6.6837e-01, 1.0907e-01, -6.1113e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_nn_conv_bi[g1-idtype1] _________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) def test_nn_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_func = th.nn.Linear(4, 5 * 10) nnconv = nn.NNConv((5, 2), 10, edge_func, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) feat_dst = F.randn((g.number_of_dst_nodes(), 2)) efeat = F.randn((g.number_of_edges(), 4)) nnconv = nnconv.to(ctx) > h = nnconv(g, (feat, feat_dst), efeat) tests\pytorch\test_nn.py:885: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\nnconv.py:168: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self.reducer('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-0.7108], [ 0.5723], [ 1.5076], [ 0.0324], [-0.8884]], [[ 0.489... [-0.0056]], [[ 1.2583], [-0.9806], [ 0.1414], [ 1.1724], [-1.6388]]]) rhs_data = tensor([[[-0.0077, 0.6619, 0.2604, -0.3285, 0.4877, -0.3253, -0.7351, -0.2655, -1.2512, -0.4471], ..., 0.0377, 0.6960, 0.1912, -0.9976, -0.7429, 0.0805, 0.7918, 0.9663, -0.3223]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g0-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-1.0394e-01, 1.5901e-01, -8.2195e-01, -5.5929e-01, -1.8217e+00, 9.4200e-01, -8.4482e-01, 8.1142....2907e-01, -2.8834e-01, 1.7658e-01, 1.8434e-02, -2.3371e-02, 3.7818e-01]]], grad_fn=) rhs_data = tensor([[[0.3099], [0.2443], [0.2831], [0.1945]], [[0.4584], [0.5196], ... [0.4078]], [[0.7378], [0.6698], [0.7158], [0.6567]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g0-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-1.0087, -0.5836, 0.8994, 0.4047, 0.5372, 1.1467, -0.4768, -0.1266, -1.0438, 0.5160], ..., 0.3299, -0.0093, 0.6166, -1.2858, -0.4726, 0.5087, -1.7000, -0.8218, -0.0943]]], grad_fn=) rhs_data = tensor([[[4.9052e-01], [4.3691e-01], [5.2787e-01], [5.0224e-01]], [[5.8906e-01], ..., [[9.0779e-01], [9.2133e-01], [9.4710e-01], [9.4793e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g1-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-5.2860e-02, 9.9621e-01, 5.3027e-01, -3.3924e-01, 2.1454e+00, 6.8411e-01, -1.7225e+00, -7.6966....8524e-01, 1.0855e+00, 3.9795e-01, -1.4060e+00, -3.7768e-01, -1.0134e+00]]], grad_fn=) rhs_data = tensor([[[1.1542e-01], [1.2960e-01], [1.3857e-01], [1.7276e-01]], [[1.9985e-01], ..., [[6.8592e-01], [6.7675e-01], [7.3623e-01], [6.6932e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g1-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 1.0668e+00, 7.9595e-01, 3.7557e-01, -1.8368e-01, 8.4075e-01, 8.7313e-02, 3.1826e-01, -4.3885....6818e-01, -1.0887e-01, -4.2135e-02, -5.4503e-04, 3.7206e-01, -7.2532e-02]]], grad_fn=) rhs_data = tensor([[[0.4501], [0.3756], [0.3836], [0.4343]], [[0.5577], [0.5648], ... [0.7938]], [[0.0612], [0.0565], [0.0564], [0.0629]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g2-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-1.1562e-01, 2.8064e-01, -3.8650e-01, 6.6696e-02, -2.6928e-02, -2.8173e-01, 7.1332e-02, 1.0294....8192e-01, -1.6281e-02, -4.2456e-01, -1.9981e-01, -6.9853e-01, 9.9441e-02]]], grad_fn=) rhs_data = tensor([[[0.1800], [0.2041], [0.3098], [0.2772]], [[0.2622], [0.2848], ... [0.3757]], [[0.0098], [0.0123], [0.0246], [0.0220]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g2-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 0.3245, 0.3329, -1.0036, -0.3040, 1.4024, -0.3393, 1.6174, 1.0631, -0.6760, -0.7969], ..., -0.5083, -0.6526, 0.0906, -0.0647, -0.5312, 0.3471, -0.1384, 0.3147, -0.2978]]], grad_fn=) rhs_data = tensor([[[0.6044], [0.6429], [0.6066], [0.5427]], [[0.0683], [0.0644], ... [0.5994]], [[0.1766], [0.1585], [0.1949], [0.1432]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g3-idtype0] __________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 6.9404e-01, -6.0983e-01, 7.1854e-01, -6.1359e-01, 1.6982e+00, -9.7232e-02, -8.8549e-01, 5.9038....5970e-01, 4.2481e-01, -7.8436e-02, -1.9853e-01, 1.0576e+00, 7.9524e-01]]], grad_fn=) rhs_data = tensor([[[0.0410], [0.1067], [0.0470], [0.0842]], [[0.1473], [0.2526], ... [0.0728]], [[0.2209], [0.1347], [0.2877], [0.1334]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g3-idtype1] __________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-1.5042e-01, -3.8891e-01, 1.7365e+00, 1.1596e+00, -7.9376e-01, 7.2831e-01, -5.9966e-01, -2.6602....5965e-01, 7.5041e-01, 1.7507e+00, 2.4121e-01, -6.4155e-01, 7.0471e-01]]], grad_fn=) rhs_data = tensor([[[0.0362], [0.0387], [0.0530], [0.0311]], [[0.1989], [0.2230], ... [0.0465]], [[0.0350], [0.0223], [0.0215], [0.0462]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g4-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-7.2804e-01, 9.3219e-01, 2.3345e-01, -7.8081e-01, -1.6124e+00, -1.3781e+00, 7.7109e-01, 7.1306....7545e-01, 3.5383e-01, 2.4587e-01, 1.6901e+00, 3.8637e-01, -1.4900e+00]]], grad_fn=) rhs_data = tensor([[[0.8726], [0.7758], [0.8965], [0.8581]], [[0.2396], [0.2013], ... [0.7634]], [[0.5795], [0.4999], [0.5012], [0.6198]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g4-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 1.0576e+00, -6.4621e-01, -4.1629e-01, -9.0518e-02, -1.2911e+00, 1.1726e+00, 2.3198e-01, -4.5388....1093e-02, 3.4171e-01, -4.2320e-01, -5.1958e-01, -5.2254e-02, 1.5779e+00]]], grad_fn=) rhs_data = tensor([[[0.1811], [0.2556], [0.2531], [0.1786]], [[0.2156], [0.2901], ... [0.0832]], [[0.1494], [0.1385], [0.1432], [0.1771]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g5-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-8.5381e-01, -1.9209e+00, 5.5114e-03, -6.2952e-01, -1.4025e+00, -7.9472e-01, -5.0047e-01, -2.9629....2686e-01, -5.5830e-01, -6.8988e-01, -1.3553e+00, 1.4776e+00, 1.1147e+00]]], grad_fn=) rhs_data = tensor([[[5.3326e-01], [5.7278e-01], [5.3645e-01], [6.2758e-01]], [[8.5498e-01], ..., [[4.9567e-01], [5.5898e-01], [5.3344e-01], [5.7535e-01]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________________ test_gmm_conv[g5-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(4, 10), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(4, 1), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) def test_gmm_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv(5, 10, 3, 4, 'mean') feat = F.randn((g.number_of_nodes(), 5)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, feat, pseudo) tests\pytorch\test_nn.py:898: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-3.9792e-01, -5.6246e-01, -1.0099e+00, -6.0917e-01, -2.2818e+00, 1.6990e+00, -3.4209e-01, 3.2864....9608e-01, -5.8952e-01, -3.5950e-01, 8.0589e-01, -2.1694e-01, 1.2902e-01]]], grad_fn=) rhs_data = tensor([[[0.8668], [0.8903], [0.8428], [0.9275]], [[0.0793], [0.0645], ... [0.0894]], [[0.9182], [0.8660], [0.8941], [0.7923]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gmm_conv_bi[g0-idtype0] _________________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite', 'block-bipartite'], exclude=['zero-degree'])) def test_gmm_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv((5, 2), 10, 3, 4, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) feat_dst = F.randn((g.number_of_dst_nodes(), 2)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, (feat, feat_dst), pseudo) tests\pytorch\test_nn.py:912: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 0.0667, 0.0939, -0.5682, -0.4721, -0.5217, -0.1882, 0.2747, 0.1830, -0.2771, 0.1894], ..., -0.0132, 0.1187, -0.4078, 0.7262, 0.0545, -0.3632, -0.2224, 0.4490, 0.2743]]], grad_fn=) rhs_data = tensor([[[0.0676], [0.0520], [0.0625], [0.0742]], [[0.7224], [0.7285], ... [0.0230]], [[0.0024], [0.0023], [0.0030], [0.0034]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gmm_conv_bi[g0-idtype1] _________________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite', 'block-bipartite'], exclude=['zero-degree'])) def test_gmm_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv((5, 2), 10, 3, 4, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) feat_dst = F.randn((g.number_of_dst_nodes(), 2)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, (feat, feat_dst), pseudo) tests\pytorch\test_nn.py:912: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-0.2430, -0.1104, 0.0136, -0.7433, -0.3674, 0.2859, 0.4140, 0.2443, -0.4271, 0.2678], ..., -0.2325, 0.5585, 0.2391, 0.1514, -0.1036, 0.1283, 0.2434, 0.1330, -0.1417]]], grad_fn=) rhs_data = tensor([[[0.8505], [0.7463], [0.8756], [0.8577]], [[0.2809], [0.2789], ... [0.3135]], [[0.0750], [0.0566], [0.0774], [0.0823]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gmm_conv_bi[g1-idtype0] _________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite', 'block-bipartite'], exclude=['zero-degree'])) def test_gmm_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv((5, 2), 10, 3, 4, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) feat_dst = F.randn((g.number_of_dst_nodes(), 2)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, (feat, feat_dst), pseudo) tests\pytorch\test_nn.py:912: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-3.8567e-01, 1.8279e-02, -4.1301e-01, 1.0180e+00, 6.4230e-01, 3.2729e-01, -2.3017e-01, 1.2685....0580e-01, -6.9238e-01, 2.1038e-01, -7.4467e-01, 2.1979e-01, -6.8019e-01]]], grad_fn=) rhs_data = tensor([[[0.2555], [0.4781], [0.4722], [0.3243]], [[0.5087], [0.6912], ... [0.4522]], [[0.5181], [0.6382], [0.5075], [0.4310]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_gmm_conv_bi[g1-idtype1] _________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite', 'block-bipartite'], exclude=['zero-degree'])) def test_gmm_conv_bi(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() gmmconv = nn.GMMConv((5, 2), 10, 3, 4, 'mean') feat = F.randn((g.number_of_src_nodes(), 5)) feat_dst = F.randn((g.number_of_dst_nodes(), 2)) pseudo = F.randn((g.number_of_edges(), 3)) gmmconv = gmmconv.to(ctx) > h = gmmconv(g, (feat, feat_dst), pseudo) tests\pytorch\test_nn.py:912: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\gmmconv.py:240: in forward graph.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 1.4966e-01, -7.5348e-01, 4.9827e-01, -3.5936e-01, 7.9210e-01, -6.2673e-01, 2.8023e-01, -9.7601....0406e-01, 2.1381e-01, 6.1623e-01, 5.2469e-02, 2.0718e-02, -3.5698e-02]]], grad_fn=) rhs_data = tensor([[[0.4400], [0.6046], [0.4899], [0.5654]], [[0.0035], [0.0070], ... [0.0050]], [[0.4921], [0.6128], [0.5089], [0.6100]]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g0-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6894], [-0.2656]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g0-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7086], [-0.7572]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g0-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.4984], [0.7740]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g0-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.2060], [2.0173]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g0-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5704], [-0.0931]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g0-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[4.3463], [0.3246]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g1-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3120], [-0.2244], [ 0.6342], [-0.3165], [-0.3298], [ 0.5249], [-0.3563], [-0.2584], [-0.3699], [-0.1353]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g1-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5592], [-0.7154], [-0.9441], [-0.2006], [-1.0904], [ 2.1187], [ 0.6767], [-0.3431], [ 1.2696], [-2.1132]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g1-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.9529], [ 1.8418], [-1.1630], [ 1.0026], [-0.7052], [-1.7842], [ 1.4343], [ 0.6845], [ 3.1847], [-0.5136]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g1-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.4136], [-0.6471], [-0.6668], [-0.8303], [-1.8566], [ 0.8562], [-0.3528], [-0.4386], [ 0.4342], [ 0.7688]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g1-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2274], [-0.2330], [ 1.2957], [ 1.4184], [-1.4058], [ 1.8791], [ 1.5203], [-0.4152], [ 0.5592], [ 0.4297]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g1-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.9057], [ 1.3922], [ 0.5030], [-0.1469], [-2.7244], [ 0.0816], [ 0.4461], [-0.6698], [ 1.0289], [-1.2757]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g2-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3752], [ 0.4392], [ 0.0116], [-0.0381], [-0.7210], [-2.1855], [ 0.2577], [ 0.8623], [-1.7276], [-2.3790]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g2-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1585], [ 0.2463], [-0.6888], [-0.5436], [-1.0343], [-0.5276], [-0.7115], [ 0.1568], [-2.7433], [-0.0563]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g2-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2162], [-0.6256], [ 0.9568], [ 0.5510], [ 0.1020], [-0.0593], [-0.0745], [ 1.5067], [ 0.5401], [ 0.8200]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g2-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1530], [-0.7762], [-0.1017], [ 0.3125], [ 0.0215], [ 0.7056], [-4.0200], [-0.8108], [ 2.0894], [ 0.3449]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g2-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0819], [-1.6520], [-0.6316], [ 1.1696], [-1.4296], [ 1.4547], [ 1.0833], [-0.9426], [ 0.6800], [-0.3857]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g2-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4034], [ 1.0044], [-1.3102], [ 0.0622], [-1.7712], [ 0.2954], [ 1.6652], [ 0.6685], [-0.3154], [-0.3576]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g3-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1242], [ 0.2950], [-0.3126], [ 0.2107], [ 0.1367], [ 1.6240], [-1.2324], [-0.0837], [ 0.6581], [ 0.3335]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g3-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0131], [-0.5568], [-1.4773], [-0.3668], [-0.1241], [-0.4529], [ 1.4737], [ 0.5293], [ 0.5611], [-0.3041]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g3-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3975], [-1.4615], [ 0.2483], [ 1.1704], [-0.9989], [ 1.9454], [ 1.0105], [-3.3315], [ 1.2930], [-0.0354]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g3-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2123], [ 0.8149], [-2.5953], [ 0.1614], [ 2.0813], [ 0.8167], [-0.4310], [ 2.6282], [-2.9950], [-0.7267]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g3-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.9283], [ 0.5980], [-1.5557], [ 0.6387], [-1.8197], [-0.2934], [-0.2531], [ 0.7039], [ 2.2292], [ 0.8129]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g3-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1846], [-0.3051], [ 0.9604], [-0.1530], [-0.2766], [-0.4155], [-1.3227], [-0.1277], [ 1.8083], [ 1.3529]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g4-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7385], [-0.0813], [-0.5557], [-1.0997], [-0.0441], [-0.3412], [ 0.9611], [-0.8789], [-0.4156]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g4-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2416], [ 0.4034], [ 1.2644], [-1.2527], [ 2.0439], [ 0.6820], [ 0.0128], [-0.1895], [-0.0925]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g4-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8641], [ 1.3333], [-0.7130], [ 2.6710], [-0.4644], [-1.3119], [-1.0645], [-0.0212], [ 0.7361]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g4-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.4108], [-0.0280], [ 0.6262], [ 0.1617], [-2.1480], [ 1.3224], [-0.1878], [-0.4613], [-0.7966]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g4-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6051], [ 1.9308], [ 1.9788], [ 1.3928], [ 0.3757], [-0.6361], [ 2.0586], [-0.5461], [ 2.9449]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g4-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2084], [-1.7593], [-1.2945], [ 0.5207], [-0.3053], [ 0.5228], [ 1.4860], [ 0.1124], [ 1.8734]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g5-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4974], [-0.0746], [ 1.0799], [-1.0802], [ 0.9766], [ 0.7196], [-0.5129], [ 1.4496], [-0.1521], [-1.2853]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g5-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2248], [ 0.1445], [-0.4187], [ 0.1567], [ 0.5457], [-0.0232], [-0.2720], [ 0.1133], [-0.5212], [-0.5260]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g5-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.7863], [ 0.2211], [ 1.0069], [-0.6882], [ 0.3965], [ 0.7213], [-1.1229], [ 0.6901], [-0.4128], [-0.3472]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g5-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.8666], [-1.3170], [-1.7080], [-1.4374], [-1.3648], [-0.3498], [-0.3272], [ 1.2613], [ 1.0615], [-1.7627]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g5-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1201], [ 1.8022], [-2.1805], [-1.3612], [ 0.1846], [ 0.3148], [-1.0696], [-1.1247], [-0.5497], [ 1.3347]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g5-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7504], [ 0.6316], [-0.7456], [-1.2564], [-1.4312], [-0.2252], [ 3.1701], [-0.3720], [ 0.3011], [-0.8195]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g6-both-idtype0] ___________________ norm_type = 'both', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5740], [-0.6218], [-1.0874], [ 3.8511], [ 0.3914], [-0.5728]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g6-both-idtype1] ___________________ norm_type = 'both', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0634], [-0.4338], [ 0.2534], [-0.1352], [-0.6974], [-1.7371]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g6-right-idtype0] __________________ norm_type = 'right', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3309], [-1.8538], [ 0.8958], [-1.0850], [-3.3681], [-3.0249]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g6-right-idtype1] __________________ norm_type = 'right', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.1006], [ 1.4585], [-0.9599], [ 2.0109], [ 0.7593], [-0.7545]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g6-none-idtype0] ___________________ norm_type = 'none', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8062], [-0.5509], [ 0.4601], [ 0.7325], [ 0.0354], [-0.2172]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g6-none-idtype1] ___________________ norm_type = 'none', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2768], [-0.1300], [ 0.3197], [-0.4221], [-0.3408], [ 0.4516]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g7-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1307], [-1.1302], [ 0.1583], [ 1.2993], [-0.8542], [ 1.4163], [ 0.0415], [-0.8618], [ 0.6743], [ 1.1464]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g7-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0031], [ 1.4958], [-0.9031], [-0.5207], [ 0.6594], [ 0.9867], [ 0.1700], [-2.2626], [ 0.8617], [ 1.6734]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g7-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.7116], [ 1.6214], [ 0.7151], [ 0.8486], [ 0.5530], [ 2.7667], [ 0.9301], [-1.6352], [-1.1589], [-0.5660]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g7-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7224], [ 0.1494], [-1.1859], [-1.0782], [-1.0921], [-0.7552], [-1.2500], [-0.6734], [-0.0864], [ 0.1035]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g7-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.7045], [-2.4822], [ 1.0656], [-0.9557], [ 0.7185], [ 0.9086], [-0.3823], [ 0.0951], [-0.6454], [-2.5429]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[1-g7-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.3929], [ 1.7412], [-2.3667], [ 0.3414], [-0.2181], [-1.9539], [-0.1957], [-0.7050], [-2.2120], [ 2.3472]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g0-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0535, 0.2615], [-0.1835, -0.3259]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g0-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6879, -0.6169], [-0.3183, 0.1158]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g0-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4571, 0.3367], [ 1.2559, -1.3124]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g0-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8919, 0.5680], [ 0.9213, 0.5819]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g0-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7677, -0.6142], [ 0.4679, 0.8218]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g0-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5966, -0.7014], [ 0.4207, -1.0862]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g1-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 6.3572e-02, -2.3987e-01], [ 1.4900e+00, 5.2945e-01], [ 9.0326e-02, -6.0087e-01], [ ....4811e-01, 6.1425e-01], [-2.1378e+00, -5.2975e-02], [-3.0054e-01, -5.1798e-02]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g1-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3212, -0.2879], [-0.0113, -0.1146], [ 0.2062, -0.1645], [ 0.0081, 0.0204], ...9, -1.1613], [-3.7358, -0.8968], [-0.9263, 1.6934], [ 0.9882, -1.1352]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g1-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2161, -3.3258], [ 0.3899, -0.4552], [ 1.3248, -1.3175], [-2.8084, 1.9503], ...3, -2.0930], [-0.5776, -0.7462], [-2.0418, 0.7217], [ 2.4984, 0.0508]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g1-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0740, -0.7849], [ 1.7928, 0.2345], [ 0.5456, -0.7928], [ 0.8768, -1.2575], ...2, -1.0160], [-0.2619, -1.5572], [-1.1251, 0.3327], [ 1.1032, -0.0503]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g1-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4197, 0.8359], [ 1.3335, 0.4224], [ 1.5052, -0.8819], [-0.6998, 0.7681], ...9, -1.4529], [-2.2650, 0.3032], [ 0.6548, -0.0101], [ 0.7254, 0.6259]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g1-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7308, 1.8016], [-0.6890, 0.5785], [ 1.1303, -1.7625], [ 2.9705, -1.3467], ...8, -3.2297], [ 0.7221, 1.0870], [-0.4338, -0.9184], [-0.7838, -0.7252]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g2-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2431, 1.6273], [ 0.1684, 0.2813], [-0.9821, -1.1271], [ 0.5550, 0.6853], ...1, -1.7264], [-1.3279, -2.6044], [ 0.0895, -1.2487], [ 1.7991, 0.3724]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g2-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5246, -0.4132], [ 0.5062, -0.0038], [-0.5340, 0.3477], [-0.2291, 0.0294], ...1, 1.1515], [ 0.9314, -0.5209], [ 0.2423, -1.3572], [ 1.0417, -1.1523]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g2-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.9216, -1.6763], [ 1.7834, -2.0112], [-0.1023, 0.4966], [ 0.5120, 0.4627], ...5, -0.3781], [-1.0866, 0.9257], [ 1.3443, -0.2144], [-0.9199, -0.9447]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g2-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4034, 0.1835], [-0.3035, -1.2629], [-0.2233, -1.7079], [ 1.8583, 1.5782], ...4, 0.9317], [ 2.1824, 1.6918], [ 2.3706, 1.0933], [ 1.6049, 1.4703]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g2-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8105, -0.7356], [-0.6959, 0.2979], [-1.3562, 0.1273], [-1.1062, 0.5512], ...1, -0.0236], [-0.7779, 1.6138], [-0.9351, 0.6571], [-0.9040, -0.3278]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g2-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5904, -0.2524], [ 1.4974, 0.6287], [-1.7777, 0.2143], [-0.5061, 0.6147], ...8, -1.0503], [ 1.1370, 1.0193], [-3.5506, 1.2077], [ 0.2752, 0.4062]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g3-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3493, -0.9388], [-0.2284, 0.9643], [ 0.3206, 0.4100], [-0.0895, -1.4870], ...9, -0.1860], [ 1.2083, -0.5335], [ 2.5894, 1.3947], [ 0.4751, -0.6062]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g3-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2779, 0.5695], [-0.2369, -0.9207], [ 0.1799, 0.7140], [-0.3010, -0.6443], ...4, 0.2508], [-0.7480, 0.4197], [-0.8351, 2.5504], [ 1.7923, 0.7883]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g3-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0159, -0.3147], [ 0.3722, -0.1600], [-1.4089, -1.2514], [ 0.5023, -0.6940], ...4, 0.3467], [-0.6323, -0.3194], [ 0.0221, 0.3430], [ 1.2794, 2.6821]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g3-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.7702, -1.0171], [-0.3998, 0.0453], [-2.9881, -0.3333], [-0.5108, 0.9458], ...5, 0.7667], [-0.2178, 0.1502], [-0.5126, -0.6607], [ 0.6344, -0.1215]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g3-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6036, 1.1359], [ 0.4259, 1.2713], [-0.4820, 1.5359], [ 0.5081, -3.0142], ...6, -2.0203], [ 0.7775, 1.7450], [-0.4477, -1.1044], [ 0.0479, -1.2119]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g3-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3831, -0.1623], [-0.2736, -1.5970], [ 0.0663, 0.4000], [-0.2874, 0.9317], ...0, 0.8166], [-1.4072, 0.5512], [ 0.4142, 0.0910], [-2.1473, 0.1844]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g4-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1397, -0.2550], [ 0.5466, 0.7480], [ 0.6702, 1.0812], [ 0.7443, 0.4168], ...9, -1.0333], [-2.1606, -2.4702], [ 0.7381, 1.0124], [ 0.2424, -0.2181]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g4-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3001, -0.2268], [ 0.2175, -0.9202], [ 0.5277, 1.1526], [ 1.2172, 3.2172], ...0, -0.7017], [-0.3155, -1.4023], [ 0.2472, 1.4226], [ 0.4283, -2.9465]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g4-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0733, 1.1845], [ 0.6701, -1.0438], [-1.0266, 1.9384], [ 0.3960, -0.9826], ...9, 0.2036], [ 0.4507, -1.2627], [ 0.5897, -1.5299], [-0.8876, 1.9282]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g4-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.6527, -1.6535], [-1.2821, -2.0337], [ 0.9056, 0.6876], [-0.8874, 0.8351], ...8, -3.1637], [ 0.2045, 0.0760], [ 2.8796, 1.1828], [-3.8797, -0.9333]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g4-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4946, 2.6052], [-2.3344, 0.6533], [ 0.8924, -0.6174], [-1.4227, -0.4393], ...4, 0.0735], [ 0.8834, 2.7240], [ 2.4323, -0.0342], [-0.1249, -0.3558]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g4-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9265, 0.3782], [-1.2697, 0.3816], [-0.8689, -1.7110], [-0.1680, -0.8853], ...9, 1.8692], [ 0.8399, -0.8960], [-0.3965, -0.8522], [ 2.4670, 1.6708]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g5-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1287, 0.8881], [ 0.7294, -0.4425], [-0.9015, 0.6449], [ 0.6682, -0.3910], ...5, 0.0996], [ 1.3359, 1.1110], [ 0.1402, 0.2153], [ 0.9476, -0.3550]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g5-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1358, -0.5234], [ 0.0036, -0.4743], [ 0.1652, 1.9433], [-0.7140, 0.5022], ...3, -0.2514], [ 1.2993, -0.1255], [-0.9504, -0.8335], [-0.3711, 0.3592]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g5-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.7265, 0.9246], [-1.2256, -1.9229], [ 0.2901, -0.4275], [-0.1803, -1.2172], ...3, 1.2344], [-0.1240, 0.6153], [ 0.4432, -0.4997], [-1.4621, -2.1706]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g5-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2621, -0.5773], [-1.1340, -0.9531], [-1.9158, -0.6562], [ 0.0861, -0.2835], ...8, -0.3894], [ 0.7520, 0.8223], [-0.8265, 2.0287], [ 0.0596, 0.5591]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g5-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9798, -2.1137], [-0.6788, -0.6855], [-1.9849, -1.5732], [ 0.2896, 0.4231], ...6, -1.7439], [ 0.2564, 0.3711], [-0.5102, 0.7334], [-0.4523, -1.2525]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g5-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.2330, -0.8414], [-0.6792, 0.3635], [-0.0952, -0.7079], [ 1.1991, -0.1358], ...7, -0.0605], [-0.0884, -0.8879], [-0.3777, 0.4511], [ 0.1352, -0.5070]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g6-both-idtype0] ___________________ norm_type = 'both', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2521, 1.2476], [-0.2045, -0.0489], [ 0.7095, -1.5968], [ 0.4035, 1.7978], [ 0.4832, 3.3781], [ 0.3848, 1.4405]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g6-both-idtype1] ___________________ norm_type = 'both', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0866, 3.4502], [-0.4442, 0.4656], [ 0.8157, 0.7665], [-0.1684, 0.3274], [-0.1779, -0.3314], [-0.2699, 0.6767]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g6-right-idtype0] __________________ norm_type = 'right', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8771, -1.3386], [ 1.0482, 1.1702], [ 2.1215, 3.8488], [-0.8253, -2.5760], [ 0.8233, -0.1761], [-0.1109, -0.4918]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g6-right-idtype1] __________________ norm_type = 'right', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7307, -0.5785], [-0.2896, -1.2098], [-0.2114, 1.6255], [ 0.3616, -0.6996], [-0.4073, 0.6564], [ 0.5113, 0.7748]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g6-none-idtype0] ___________________ norm_type = 'none', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3509, 0.8456], [ 1.7724, 0.1835], [-4.2332, -1.7631], [ 0.0956, -1.1362], [ 0.0673, 0.3801], [-2.3810, 0.6680]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g6-none-idtype1] ___________________ norm_type = 'none', g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1505, -1.0392], [-0.5703, -0.1622], [ 1.1073, -0.7158], [ 1.9212, -1.2566], [ 0.7413, 0.6405], [-0.3005, -0.2180]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g7-both-idtype0] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1269, -0.1796], [-0.4185, -0.3096], [ 0.8369, 0.2194], [ 0.7925, 0.5175], ...1, -0.9139], [ 2.9453, 0.1506], [-1.6357, -0.0334], [ 3.2946, 0.7874]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g7-both-idtype1] ___________________ norm_type = 'both' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2975, -0.4201], [ 0.6562, -0.4391], [-1.3458, 0.1775], [-0.3952, 0.2400], ...0, -1.4653], [ 0.1183, -0.0391], [-0.1510, 0.1255], [ 1.0389, -0.7123]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g7-right-idtype0] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5662, 0.9773], [-0.1745, -0.1897], [-0.2755, -1.0459], [-0.8420, 1.1128], ...7, -0.7359], [-0.0192, 0.4320], [-2.0010, 0.3151], [-0.6168, -0.4310]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g7-right-idtype1] __________________ norm_type = 'right' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3473, -1.4332], [-0.0938, 2.1125], [ 0.2526, 0.1867], [-1.5942, 1.0249], ...3, -0.1221], [-0.2958, -1.8970], [ 0.6376, 0.2688], [ 0.2782, 1.5352]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g7-none-idtype0] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6513, 1.9487], [-0.7282, 1.3980], [-0.4052, 0.9704], [-0.0414, -0.1981], ...5, -1.9093], [-1.5928, 0.6559], [-0.4361, -0.7507], [-0.0270, 0.2611]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_dense_graph_conv[2-g7-none-idtype1] ___________________ norm_type = 'none' g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('norm_type', ['both', 'right', 'none']) @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_graph_conv(norm_type, g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() # TODO(minjie): enable the following option after #1385 adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() conv = nn.GraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv = nn.DenseGraphConv(5, out_dim, norm=norm_type, bias=True) dense_conv.weight.data = conv.weight.data dense_conv.bias.data = conv.bias.data feat = F.randn((g.number_of_src_nodes(), 5)) conv = conv.to(ctx) dense_conv = dense_conv.to(ctx) > out_conv = conv(g, feat) tests\pytorch\test_nn.py:932: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9374, 1.1745], [ 1.1024, 0.4289], [-0.2881, -0.2376], [ 2.5788, -1.1294], ...6, 1.1821], [-0.4488, -0.5485], [-0.2000, -1.7327], [-2.4283, -1.7645]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g0-idtype0] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4381], [ 3.1397]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g0-idtype1] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.7132], [0.4321]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g1-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0178], [-4.2534], [ 8.1334], [-0.2057], [ 2.5950], [-1.2583], [ 0.5171], [ 0.0298], [-1.0775], [-1.0520]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g1-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2534], [-1.4586], [ 0.2344], [ 1.5878], [-0.3027], [-0.5381], [-1.3500], [-0.6228], [ 0.9664], [-0.1548]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g2-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3687], [-2.4167], [ 0.2740], [-0.9262], [ 1.2199], [ 0.3057], [ 1.0072], [-1.2542], [-0.0223], [-0.8523]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g2-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1472], [ 2.9466], [-2.1785], [ 0.3249], [-0.1859], [-0.4245], [-1.1086], [ 0.5914], [-0.4578], [ 1.2486]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g3-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0970], [-3.1663], [ 3.3163], [ 0.2478], [ 1.7958], [-0.4478], [-0.1523], [ 4.3373], [ 2.5558], [-1.8230]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g3-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4889], [ 0.1853], [-3.6972], [-0.7642], [-3.8500], [-0.1650], [ 1.4379], [-1.4547], [ 1.9583], [ 2.4875]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g4-idtype0] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.2967], [-2.0195], [-1.9533], [ 0.6717], [ 1.0001], [-2.0296], [-2.1718], [ 5.2240], [ 1.6120]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g4-idtype1] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7903], [-1.7348], [ 3.2055], [-0.0260], [ 1.7800], [ 1.0049], [ 5.5662], [-0.7228], [-0.4006]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g5-idtype0] ______________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.6542], [ 0.3013], [ 1.9912], [ 1.6111], [ 1.0939]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g5-idtype1] ______________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5626], [ 1.5025], [-0.7117], [ 0.4166], [ 1.7778]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g6-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 4.7120], [-3.4943], [-4.2556], [ 1.2847], [ 2.0320], [ 0.7146], [ 3.4068], [ 0.3882], [-1.5304], [ 0.5456]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g6-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8245], [ 1.3567], [-1.1342], [-1.8312], [-0.0789], [-0.7032], [-1.6584], [-0.4862], [-2.5145], [ 1.5917]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g7-idtype0] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.3729], [ 0.5553], [ 1.4915], [ 2.5368], [-1.1188], [-1.9325]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g7-idtype1] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.3041], [-0.4078], [ 1.7749], [ 0.7627], [-0.0916], [ 0.5965]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g8-idtype0] ______________________ g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.7658], [-1.2219], [ 0.1969], [-1.3367]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g8-idtype1] ______________________ g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.4340], [-1.4126], [-0.0205], [ 2.3952]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g9-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.3321], [-1.5974], [-2.3200], [-0.5495], [ 2.2735], [-1.2528], [-1.0960], [-1.9671], [-3.1997], [ 1.0793]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[1-g9-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(1,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2176], [ 0.7364], [-0.8227], [-2.8005], [-1.6167], [ 0.3160], [ 0.0540], [-0.6274], [-0.3670], [-1.2996]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g0-idtype0] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9490, -1.0154], [ 1.6560, -0.6989]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g0-idtype1] ______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5184, -1.3156], [-0.6252, 2.6336]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g1-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.7403, 2.1781], [ 0.9852, -0.1202], [-4.5816, -1.0097], [ 1.0206, -0.3369], ...2, 0.3126], [-3.5051, -5.3337], [ 4.4905, 1.1021], [ 1.8589, -1.3745]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g1-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.4170, 0.4901], [ 1.9065, 1.2294], [-0.8915, -0.7057], [-1.4940, 2.0305], ...9, 2.4506], [-3.2785, -0.2264], [ 2.1410, 0.7881], [-1.4397, -1.1673]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g2-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7075, 1.5021], [ 0.3436, -1.5150], [ 0.8802, -0.0318], [ 0.3353, -1.5854], ...3, 0.2115], [-1.4992, 1.5468], [-0.1891, 0.5314], [ 0.7700, -1.1053]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g2-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0237, -3.9334], [ 2.0856, 0.9338], [-0.2846, 0.1558], [-0.9458, 2.4010], ...2, 1.7329], [-0.1940, -1.0803], [-0.8622, -0.0419], [ 2.7706, 0.0070]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g3-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0378, -2.9151], [ 0.6657, 1.2088], [ 1.1628, 1.5059], [-1.4201, -1.1553], ...6, -0.4502], [-0.8209, 0.7523], [-2.6323, -3.9257], [ 1.0499, -0.3561]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g3-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.2368, -1.0537], [ 1.7805, -1.4698], [-3.3394, 2.0873], [-1.8355, 2.3716], ...2, 0.3513], [-1.5665, 1.1912], [-2.5450, 0.5936], [ 0.2189, -0.2958]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g4-idtype0] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5831, -0.0939], [ 0.5439, -0.5663], [ 1.2872, 1.1971], [ 0.8992, -0.0141], ...2, 0.8591], [-0.2988, 1.7972], [ 0.1536, -0.2179], [ 1.5247, -0.1977]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g4-idtype1] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2655, 1.4906], [ 2.9605, -1.6507], [ 0.2846, -0.0444], [ 0.3875, 0.5456], ...0, -1.8850], [ 0.6776, -1.0694], [ 1.2769, -0.4919], [-0.7781, 1.5584]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g5-idtype0] ______________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.3531, -0.0311], [ 1.4617, 0.0862], [-2.0810, 0.7310], [-0.0582, 0.1861], [ 0.3801, 1.2621]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g5-idtype1] ______________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.4476, 2.4650], [-1.1160, -1.3316], [ 1.6811, 1.7069], [ 2.8628, 0.4882], [ 1.4515, 1.0017]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g6-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-3.3322, 2.8596], [ 0.8547, -0.0254], [-0.5779, -0.3182], [-2.1621, 1.2550], ...3, -0.2722], [-1.3559, 1.5236], [-1.5852, 0.3707], [ 0.3736, -0.2405]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g6-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2600, 1.2842], [ 0.6575, 1.4200], [-1.6534, -0.3604], [-1.7044, 0.8796], ...4, -0.7638], [-0.9467, 0.2027], [ 0.7426, -1.1356], [-0.2186, -0.7314]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g7-idtype0] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0483, 1.0428], [-2.1856, -2.6527], [-0.4817, 0.2964], [-0.6922, -1.1735], [ 0.4996, -0.8630], [ 0.5500, 0.5962]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g7-idtype1] ______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8900, -0.1851], [-0.4296, -0.3760], [ 0.3017, -0.4540], [ 0.5240, -0.4295], [-0.0224, -0.3005], [-0.4279, 0.3025]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g8-idtype0] ______________________ g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8889, -0.2677], [ 0.0987, -0.0481], [-0.2068, 0.5036], [ 0.2338, -0.4842]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g8-idtype1] ______________________ g = Graph(num_nodes={'_U': 4, '_V': 5}, num_edges={('_U', '_E', '_V'): 6}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.4788, 0.2700], [-0.3567, -0.6105], [-0.8950, -0.3237], [ 1.5183, 1.0003]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g9-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.8393, -0.6429], [ 2.8410, 1.2053], [ 1.8389, 1.3328], [ 1.8312, -0.6356], ...0, 0.3235], [ 0.7834, 0.0416], [-0.9999, 0.0309], [ 0.5188, -0.0297]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_dense_sage_conv[2-g9-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_sage_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() sage = nn.SAGEConv(5, out_dim, 'gcn') dense_sage = nn.DenseSAGEConv(5, out_dim) dense_sage.fc.weight.data = sage.fc_neigh.weight.data dense_sage.fc.bias.data = sage.bias.data if len(g.ntypes) == 2: feat = ( F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)) ) else: feat = F.randn((g.number_of_nodes(), 5)) sage = sage.to(ctx) dense_sage = dense_sage.to(ctx) > out_sage = sage(g, feat) tests\pytorch\test_nn.py:956: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8633, 0.4435], [-2.5902, 0.2579], [ 1.0817, 0.9767], [-1.2130, -0.7126], ...3, -0.2996], [ 0.3737, 0.1230], [ 4.3576, -0.8524], [ 0.5457, 1.3922]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________________ test_edge_conv[1-g0-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.0486, 0.3352, 0.4841, 0.1585, -1.2404], [-0.2769, -0.2891, 0.2224, -0.4155, -1.2190], ...2.1852], [-0.0289, 1.6883, -0.8614, 0.4308, -0.3867], [-0.1819, -0.4257, 0.6875, 1.6385, -0.6501]]) rhs_data = tensor([[-0.0486, -0.3352, -0.4841, -0.1585, 1.2404], [ 0.2769, 0.2891, -0.2224, 0.4155, 1.2190], ...2.1852], [ 0.0289, -1.6883, 0.8614, -0.4308, 0.3867], [ 0.1819, 0.4257, -0.6875, -1.6385, 0.6501]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g0-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.5136, -1.4639, 1.5337, 1.4827, -0.6921], [-0.7769, -0.3269, 0.3802, 0.8433, 0.2765], ...1.5659], [ 0.3988, 0.5652, 1.1266, -0.1006, 0.0758], [ 1.7179, 0.0114, 1.8013, 1.3129, 1.3405]]) rhs_data = tensor([[ 0.5136, 1.4639, -1.5337, -1.4827, 0.6921], [ 0.7769, 0.3269, -0.3802, -0.8433, -0.2765], ...1.5659], [-0.3988, -0.5652, -1.1266, 0.1006, -0.0758], [-1.7179, -0.0114, -1.8013, -1.3129, -1.3405]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g1-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.2480, -1.8522, -0.1975, -0.6914, 1.2823], [-1.0842, 0.3590, -1.1475, 0.0960, -0.9253], ...1.6976], [ 1.0603, 0.2190, -0.4218, -1.0665, 0.2321], [ 0.2228, 0.3022, -0.6097, -0.4752, -1.2877]]) rhs_data = tensor([[-1.2480, 1.8522, 0.1975, 0.6914, -1.2823], [ 1.0842, -0.3590, 1.1475, -0.0960, 0.9253], ...1.6976], [-1.0603, -0.2190, 0.4218, 1.0665, -0.2321], [-0.2228, -0.3022, 0.6097, 0.4752, 1.2877]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g1-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.7542, 1.2048, -1.3002, 0.5082, 0.0886], [-0.9296, 0.4856, -0.5248, 0.0413, -0.2500], ...0.0868], [ 0.9264, 0.9928, -0.7169, -1.0721, -1.5658], [ 0.5802, 0.7989, -0.0451, 0.0844, 0.3129]]) rhs_data = tensor([[-0.7542, -1.2048, 1.3002, -0.5082, -0.0886], [ 0.9296, -0.4856, 0.5248, -0.0413, 0.2500], ...0.0868], [-0.9264, -0.9928, 0.7169, 1.0721, 1.5658], [-0.5802, -0.7989, 0.0451, -0.0844, -0.3129]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g2-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.9559, 1.6076, 1.2898, 1.0605, -0.2799], [-0.7241, 0.0056, -1.3432, -0.3699, 0.5676], ...1.8477], [ 1.6227, 0.1986, 0.9330, -0.5169, 0.7565], [ 0.1157, 0.5162, -0.4353, -1.0177, -1.0691]]) rhs_data = tensor([[ 0.9559, -1.6076, -1.2898, -1.0605, 0.2799], [ 0.7241, -0.0056, 1.3432, 0.3699, -0.5676], ...1.8477], [-1.6227, -0.1986, -0.9330, 0.5169, -0.7565], [-0.1157, -0.5162, 0.4353, 1.0177, 1.0691]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g2-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.1355, -0.8171, -1.8280, 2.6747, 0.1938], [ 0.2894, -0.5278, -2.1278, -0.6207, 0.2268], ...0.1360], [ 0.4286, 1.5195, -0.8199, 0.5392, -1.9381], [ 1.0424, 1.7486, -0.4171, -0.1380, -1.6583]]) rhs_data = tensor([[ 0.1355, 0.8171, 1.8280, -2.6747, -0.1938], [-0.2894, 0.5278, 2.1278, 0.6207, -0.2268], ...0.1360], [-0.4286, -1.5195, 0.8199, -0.5392, 1.9381], [-1.0424, -1.7486, 0.4171, 0.1380, 1.6583]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g3-idtype0] _________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-7.0816e-01, 4.1643e-02, -1.0095e-01, 9.8841e-01, 7.6864e-01], [ 1.8465e-01, 6.2129e-01, 3.3920e...00, -7.4651e-01, -2.8184e-02, 6.6963e-01], [ 5.6946e-01, 1.5591e+00, -1.2900e+00, -5.4384e-01, -7.9795e-01]]) rhs_data = tensor([[ 7.0816e-01, -4.1643e-02, 1.0095e-01, -9.8841e-01, -7.6864e-01], [-1.8465e-01, -6.2129e-01, -3.3920e...00, 7.4651e-01, 2.8184e-02, -6.6963e-01], [-5.6946e-01, -1.5591e+00, 1.2900e+00, 5.4384e-01, 7.9795e-01]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g3-idtype1] _________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.5552, 1.1992, 0.9214, -1.3064, -0.3379], [ 0.8146, -0.6228, -1.6886, -0.4540, -1.0247], ...1.0450], [-0.8386, 0.8527, 1.8547, 0.5756, -0.4845], [-2.2683, -0.7701, 2.9316, -3.1989, -1.3097]]) rhs_data = tensor([[ 0.5552, -1.1992, -0.9214, 1.3064, 0.3379], [-0.8146, 0.6228, 1.6886, 0.4540, 1.0247], ...1.0450], [ 0.8386, -0.8527, -1.8547, -0.5756, 0.4845], [ 2.2683, 0.7701, -2.9316, 3.1989, 1.3097]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g4-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.3758, 0.6346, 0.3090, 0.6035, -0.8740], [ 0.4706, 0.6435, -1.1937, 0.1546, -0.1859], ...0.7161], [ 0.0214, -1.0867, 0.4669, -1.1128, -0.8790], [-1.3629, -1.0645, 0.6803, 0.7043, -1.8198]]) rhs_data = tensor([[ 0.3758, -0.6346, -0.3090, -0.6035, 0.8740], [-0.4706, -0.6435, 1.1937, -0.1546, 0.1859], ...0.7161], [-0.0214, 1.0867, -0.4669, 1.1128, 0.8790], [ 1.3629, 1.0645, -0.6803, -0.7043, 1.8198]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g4-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.3937, 0.6486, 0.4478, -1.1440, 0.4023], [-1.0325, -0.1026, -0.1505, -0.6028, 0.1455], ...0.4223], [ 0.6643, -1.6333, -0.2763, -0.9425, 1.3614], [-1.3606, -0.9159, 0.6320, 2.0441, -1.0964]]) rhs_data = tensor([[-1.3937, -0.6486, -0.4478, 1.1440, -0.4023], [ 1.0325, 0.1026, 0.1505, 0.6028, -0.1455], ...0.4223], [-0.6643, 1.6333, 0.2763, 0.9425, -1.3614], [ 1.3606, 0.9159, -0.6320, -2.0441, 1.0964]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g5-idtype0] _________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.1372, 0.3909, -0.2051, -0.1314, -0.6487], [-1.0132, -0.4128, -0.8828, 1.2638, 0.2821], [ 0.1515, 1.5877, 0.8232, 0.9604, 0.5296]]) rhs_data = tensor([[ 0.1372, -0.3909, 0.2051, 0.1314, 0.6487], [ 1.0132, 0.4128, 0.8828, -1.2638, -0.2821], ...0.4779], [-0.1748, 1.0415, 0.2310, 0.8693, -1.6413], [-0.1419, -0.6125, 1.5626, 0.8603, -0.7813]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g5-idtype1] _________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.4553, -0.9854, -0.0216, -1.9448, -1.2369], [ 1.7016, 0.0099, -0.8279, -0.0410, -0.0900], [ 1.1371, 0.7308, -0.3376, -0.0487, 0.3502]]) rhs_data = tensor([[-1.4553, 0.9854, 0.0216, 1.9448, 1.2369], [-1.7016, -0.0099, 0.8279, 0.0410, 0.0900], ...0.1876], [ 0.1043, 0.2221, 1.3706, 1.6082, 1.4585], [ 1.5756, -1.2806, -1.6523, -1.9453, 0.4100]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g6-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.2712, -0.6177, -1.8023, 0.1760, 0.1517], [-2.1062, -0.1434, 0.1742, -0.4430, -0.1527], ...0.5282], [-1.8916, -1.2768, 0.6588, 0.9638, -0.6020], [-0.1970, -0.4955, 1.9567, -1.8647, 0.7032]]) rhs_data = tensor([[-1.2712, 0.6177, 1.8023, -0.1760, -0.1517], [ 2.1062, 0.1434, -0.1742, 0.4430, 0.1527], ...0.5282], [ 1.8916, 1.2768, -0.6588, -0.9638, 0.6020], [ 0.1970, 0.4955, -1.9567, 1.8647, -0.7032]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[1-g6-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.5704, -0.1709, 1.6287, -0.3893, -1.8803], [-0.3791, 0.8389, -0.9468, 0.2351, -0.5759], ...0.0056], [ 0.8449, 0.4759, -0.3591, -1.3274, -0.4474], [-0.0172, -0.8281, 0.3922, -0.3871, -1.1245]]) rhs_data = tensor([[-0.5704, 0.1709, -1.6287, 0.3893, 1.8803], [ 0.3791, -0.8389, 0.9468, -0.2351, 0.5759], ...0.0056], [-0.8449, -0.4759, 0.3591, 1.3274, 0.4474], [ 0.0172, 0.8281, -0.3922, 0.3871, 1.1245]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) ________________________ test_edge_conv[2-g0-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.0467, 0.6822, -2.0434, -0.6843, 1.1932], [ 0.3530, 0.3166, 2.8570, 0.5172, 0.3971], ...0.3209], [ 0.1236, -0.0431, -1.2726, 0.9989, -0.0455], [ 0.9604, 1.7321, -2.0296, -0.6182, -0.6574]]) rhs_data = tensor([[ 1.0467, -0.6822, 2.0434, 0.6843, -1.1932], [-0.3530, -0.3166, -2.8570, -0.5172, -0.3971], ...0.3209], [-0.1236, 0.0431, 1.2726, -0.9989, 0.0455], [-0.9604, -1.7321, 2.0296, 0.6182, 0.6574]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g0-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 4.6883e-02, -1.3889e+00, 7.9162e-01, -1.6002e+00, -5.4146e-01], [ 9.1432e-02, 8.7884e-01, 1.0614e...00, 4.5172e-01, -6.7632e-01, -5.2416e-02], [-3.3287e-01, -4.9955e-01, 6.9548e-01, -1.7118e+00, 4.6134e-01]]) rhs_data = tensor([[-4.6883e-02, 1.3889e+00, -7.9162e-01, 1.6002e+00, 5.4146e-01], [-9.1432e-02, -8.7884e-01, -1.0614e...00, -4.5172e-01, 6.7632e-01, 5.2416e-02], [ 3.3287e-01, 4.9955e-01, -6.9548e-01, 1.7118e+00, -4.6134e-01]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g1-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 2.6122, 0.6351, -0.6148, -0.3339, 0.5510], [-1.1324, -0.8523, 0.7315, -0.7203, 1.4622], ...0.4787], [-0.0921, -1.3091, 0.8404, 1.8181, -0.7372], [ 0.3272, 1.2601, 0.0613, 0.7541, 1.1493]]) rhs_data = tensor([[-2.6122, -0.6351, 0.6148, 0.3339, -0.5510], [ 1.1324, 0.8523, -0.7315, 0.7203, -1.4622], ...0.4787], [ 0.0921, 1.3091, -0.8404, -1.8181, 0.7372], [-0.3272, -1.2601, -0.0613, -0.7541, -1.1493]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g1-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.9536, -0.5266, 1.3723, -1.6593, 1.2216], [ 0.2664, -0.2312, 0.7005, -1.5385, -0.7313], ...1.1688], [ 0.4359, 1.1523, -0.8423, 0.2683, -0.5432], [-0.4415, 1.5636, -0.4892, -0.7642, 2.7993]]) rhs_data = tensor([[ 0.9536, 0.5266, -1.3723, 1.6593, -1.2216], [-0.2664, 0.2312, -0.7005, 1.5385, 0.7313], ...1.1688], [-0.4359, -1.1523, 0.8423, -0.2683, 0.5432], [ 0.4415, -1.5636, 0.4892, 0.7642, -2.7993]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g2-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.6287, -1.1842, -0.4102, 0.5920, 1.0011], [-0.1276, 0.5879, 0.3422, 1.3506, -0.5213], ...0.8062], [-0.6234, -0.4602, 0.4185, 0.7299, -0.7100], [ 1.1459, 0.3243, 1.2374, -0.6434, -0.7038]]) rhs_data = tensor([[ 0.6287, 1.1842, 0.4102, -0.5920, -1.0011], [ 0.1276, -0.5879, -0.3422, -1.3506, 0.5213], ...0.8062], [ 0.6234, 0.4602, -0.4185, -0.7299, 0.7100], [-1.1459, -0.3243, -1.2374, 0.6434, 0.7038]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g2-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.7821, 0.8230, -0.6369, -1.4772, 0.9777], [ 0.5996, 1.3512, 0.2797, -1.1523, 1.2788], ...0.5372], [-0.1322, 1.6087, -0.7401, 0.5466, 0.9431], [ 0.0514, -2.7608, -0.1321, -1.4494, 0.1496]]) rhs_data = tensor([[-1.7821, -0.8230, 0.6369, 1.4772, -0.9777], [-0.5996, -1.3512, -0.2797, 1.1523, -1.2788], ...0.5372], [ 0.1322, -1.6087, 0.7401, -0.5466, -0.9431], [-0.0514, 2.7608, 0.1321, 1.4494, -0.1496]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g3-idtype0] _________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.6476, -0.1591, 0.2446, -1.8975, 1.0186], [ 0.2461, -0.0846, -0.5721, -0.8421, -0.7333], ...0.7965], [-0.6208, 0.1667, -0.2236, 0.1140, 1.6281], [ 0.3169, -0.4587, -0.2594, 0.8978, -1.5922]]) rhs_data = tensor([[-0.6476, 0.1591, -0.2446, 1.8975, -1.0186], [-0.2461, 0.0846, 0.5721, 0.8421, 0.7333], ...0.7965], [ 0.6208, -0.1667, 0.2236, -0.1140, -1.6281], [-0.3169, 0.4587, 0.2594, -0.8978, 1.5922]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g3-idtype1] _________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.4314, 0.1708, -0.0978, 0.5323, 0.3766], [-0.6014, 0.2944, 0.2122, 2.1762, -1.1645], ...0.6536], [-0.3113, 1.5779, -1.2486, 0.2441, 0.6326], [ 0.4450, 0.7251, 0.6062, -2.2069, 0.2868]]) rhs_data = tensor([[ 0.4314, -0.1708, 0.0978, -0.5323, -0.3766], [ 0.6014, -0.2944, -0.2122, -2.1762, 1.1645], ...0.6536], [ 0.3113, -1.5779, 1.2486, -0.2441, -0.6326], [-0.4450, -0.7251, -0.6062, 2.2069, -0.2868]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g4-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.2836, 0.7614, -1.5352, -1.2427, -2.0061], [ 0.8611, -2.7123, 0.1213, -0.8119, 0.0168], ...0.0256], [ 0.0291, -0.3433, 1.2727, -0.4527, -0.3901], [ 0.3806, 1.8876, -0.2478, -0.9003, 0.4304]]) rhs_data = tensor([[-0.2836, -0.7614, 1.5352, 1.2427, 2.0061], [-0.8611, 2.7123, -0.1213, 0.8119, -0.0168], ...0.0256], [-0.0291, 0.3433, -1.2727, 0.4527, 0.3901], [-0.3806, -1.8876, 0.2478, 0.9003, -0.4304]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g4-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.9751e-01, 2.1486e+00, -3.5153e-01, -1.0911e+00, -8.3551e-01], [ 1.1672e+00, -7.0716e-01, 1.3496e...01, -1.0408e+00, 6.1995e-01, 4.8403e-01], [ 1.0102e+00, -9.4458e-01, 3.0140e-01, 1.3150e-03, 3.1131e-01]]) rhs_data = tensor([[ 1.9751e-01, -2.1486e+00, 3.5153e-01, 1.0911e+00, 8.3551e-01], [-1.1672e+00, 7.0716e-01, -1.3496e...01, 1.0408e+00, -6.1995e-01, -4.8403e-01], [-1.0102e+00, 9.4458e-01, -3.0140e-01, -1.3150e-03, -3.1131e-01]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g5-idtype0] _________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.3245, 0.9317, 0.9036, -0.6901, -1.0062], [ 0.4788, 0.7190, -0.9677, 0.8543, 0.3720], [ 0.2968, 0.2801, -0.8357, 1.2710, -1.7688]]) rhs_data = tensor([[-0.3245, -0.9317, -0.9036, 0.6901, 1.0062], [-0.4788, -0.7190, 0.9677, -0.8543, -0.3720], ...1.2238], [-1.6393, 1.1642, 1.5238, -0.2397, 0.2302], [-0.8122, -0.1605, 2.1358, 0.5376, -1.1153]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g5-idtype1] _________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.4703, 1.3877, 0.3466, -1.1047, -0.0428], [-2.0723, 1.2491, -0.8536, 2.2255, -1.0305], [ 0.7438, 0.8698, 1.1144, 2.3243, -1.1786]]) rhs_data = tensor([[-0.4703, -1.3877, -0.3466, 1.1047, 0.0428], [ 2.0723, -1.2491, 0.8536, -2.2255, 1.0305], ...0.1292], [ 0.1026, -0.6543, -0.7291, -0.8119, 1.4093], [-0.2768, -0.9552, 0.5928, -0.2453, -1.2137]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g6-idtype0] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.7281, 1.3727, -1.5217, 0.8874, -1.5569], [ 1.2091, 0.4248, 0.1020, 1.1541, 0.8557], ...0.8428], [ 3.4457, -0.8480, -0.9980, -1.2621, 0.1704], [-1.0108, 0.6933, -1.0230, -1.2971, 1.0327]]) rhs_data = tensor([[-0.7281, -1.3727, 1.5217, -0.8874, 1.5569], [-1.2091, -0.4248, -0.1020, -1.1541, -0.8557], ...0.8428], [-3.4457, 0.8480, 0.9980, 1.2621, -0.1704], [ 1.0108, -0.6933, 1.0230, 1.2971, -1.0327]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ________________________ test_edge_conv[2-g6-idtype1] _________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'x': Scheme(shape=(5,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) # test pickle th.save(edge_conv, tmp_buffer) h0 = F.randn((g.number_of_src_nodes(), 5)) > h1 = edge_conv(g, h0) tests\pytorch\test_nn.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.7974, 0.7729, 1.0826, 0.3204, -1.1722], [ 0.4221, 2.0494, -1.5402, 0.7005, -0.0130], ...0.6546], [-0.9132, -1.2859, 0.9105, 1.4827, 0.7854], [-0.7712, 0.7687, -0.5193, 0.1445, 0.2761]]) rhs_data = tensor([[ 0.7974, -0.7729, -1.0826, -0.3204, 1.1722], [-0.4221, -2.0494, 1.5402, -0.7005, 0.0130], ...0.6546], [ 0.9132, 1.2859, -0.9105, -1.4827, -0.7854], [ 0.7712, -0.7687, 0.5193, -0.1445, -0.2761]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) _______________________ test_edge_conv_bi[1-g0-idtype0] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv_bi(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) h0 = F.randn((g.number_of_src_nodes(), 5)) x0 = F.randn((g.number_of_dst_nodes(), 5)) > h1 = edge_conv(g, (h0, x0)) tests\pytorch\test_nn.py:986: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.4482, -1.0390, 2.3526, -0.3421, -0.8654], [ 1.7475, -1.2230, 0.4174, 1.0663, -0.0574], [-0.6349, -0.3127, -0.6098, 2.8471, 0.0724], [-1.8267, 0.7960, -0.1545, 2.3544, 1.3370]]) rhs_data = tensor([[-0.1172, -1.4900, 0.4269, 0.6653, -0.7449], [ 0.2760, 0.4600, -0.4274, 0.2719, 1.2312]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) _______________________ test_edge_conv_bi[1-g0-idtype1] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv_bi(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) h0 = F.randn((g.number_of_src_nodes(), 5)) x0 = F.randn((g.number_of_dst_nodes(), 5)) > h1 = edge_conv(g, (h0, x0)) tests\pytorch\test_nn.py:986: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.4233, 1.2786, -0.2211, 0.4992, -1.1761], [-1.3803, 1.0027, -0.4542, 0.7570, 0.0147], [ 0.8226, 0.2086, -0.9733, 1.6012, 1.1022], [ 1.2063, -0.9509, -2.0677, -0.8150, 0.7429]]) rhs_data = tensor([[-0.2381, 0.4569, -0.5258, 0.6573, 1.5449], [-0.3189, -0.5085, 0.2481, -1.9428, -0.4919]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) _______________________ test_edge_conv_bi[1-g1-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv_bi(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) h0 = F.randn((g.number_of_src_nodes(), 5)) x0 = F.randn((g.number_of_dst_nodes(), 5)) > h1 = edge_conv(g, (h0, x0)) tests\pytorch\test_nn.py:986: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.8523, -1.5086, 0.5729, -1.4259, -1.1680], [-0.5033, -0.7529, 0.1385, -1.1288, -0.8033], [-0.4452, 0.4813, -0.9189, -0.9522, -0.0080]]) rhs_data = tensor([[ 1.0079, -1.6104, -1.1301, -1.4617, 0.2413], [ 1.6081, 2.0187, -1.2759, 1.7802, 0.0058], ...0.1624], [ 0.2959, 0.7917, -0.2627, -1.4337, -0.6803], [-0.2641, -1.2889, 0.2008, 1.6873, 0.4732]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) _______________________ test_edge_conv_bi[1-g1-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv_bi(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) h0 = F.randn((g.number_of_src_nodes(), 5)) x0 = F.randn((g.number_of_dst_nodes(), 5)) > h1 = edge_conv(g, (h0, x0)) tests\pytorch\test_nn.py:986: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.0807, 2.2112, 1.4780, -0.5147, 0.3133], [-1.7687, 0.9437, -0.8441, -1.3617, -0.2863], [ 1.6733, -1.2069, 0.0911, 0.2173, -0.4771]]) rhs_data = tensor([[-0.8197, -0.8039, 0.8894, 0.2984, -0.7267], [ 0.0263, 0.7333, 0.5352, 1.2911, 0.4380], ...0.0802], [-0.4048, 0.6796, 0.0595, -0.2775, -0.3414], [ 0.2645, -1.3595, 1.3935, -0.3734, 1.0322]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=1, bias=True) (phi): Linear(in_features=5, out_features=1, bias=True) ) _______________________ test_edge_conv_bi[2-g0-idtype0] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv_bi(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) h0 = F.randn((g.number_of_src_nodes(), 5)) x0 = F.randn((g.number_of_dst_nodes(), 5)) > h1 = edge_conv(g, (h0, x0)) tests\pytorch\test_nn.py:986: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.1190, -1.5178, 1.2033, -0.7864, -1.0407], [-0.5289, -0.3738, -0.0890, 0.4506, -0.4597], [ 1.9414, 2.4532, 0.1634, -1.3393, -2.2887], [ 0.0984, 1.2950, 0.3104, -0.5171, 0.2836]]) rhs_data = tensor([[ 0.1230, -0.6114, 0.2976, 0.8652, 2.2191], [ 1.0884, -0.8765, 0.2848, -0.6950, -1.5342]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) _______________________ test_edge_conv_bi[2-g0-idtype1] _______________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv_bi(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) h0 = F.randn((g.number_of_src_nodes(), 5)) x0 = F.randn((g.number_of_dst_nodes(), 5)) > h1 = edge_conv(g, (h0, x0)) tests\pytorch\test_nn.py:986: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.0321, -0.3868, -0.7811, 0.7588, -0.6651], [ 0.6558, 1.0836, 1.4311, 0.2528, 1.1681], [-0.9874, 1.3505, 0.5802, -0.9096, -0.3701], [ 1.0805, 0.6331, 1.9989, -0.1749, -0.4437]]) rhs_data = tensor([[ 1.4245, 0.0220, -1.5458, -2.1044, 2.2117], [ 2.4226, -0.0622, -0.0212, -0.0769, 0.1360]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) _______________________ test_edge_conv_bi[2-g1-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv_bi(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) h0 = F.randn((g.number_of_src_nodes(), 5)) x0 = F.randn((g.number_of_dst_nodes(), 5)) > h1 = edge_conv(g, (h0, x0)) tests\pytorch\test_nn.py:986: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.0464, -1.4496, 2.5057, 0.5077, 0.7538], [-1.6744, 0.1616, 1.1393, 0.1523, -0.3104], [-0.1068, -0.0039, 0.1814, 0.3441, -0.6041]]) rhs_data = tensor([[ 0.2958, -0.1471, -0.5130, -0.2249, -0.3845], [-0.3507, -1.4648, 1.1246, 0.7618, 1.5231], ...0.7443], [ 0.5420, 0.0401, 1.1182, -0.0514, 0.5618], [ 0.8810, 0.3216, -0.0967, 0.7743, 0.2607]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) _______________________ test_edge_conv_bi[2-g1-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_edge_conv_bi(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() edge_conv = nn.EdgeConv(5, out_dim).to(ctx) print(edge_conv) h0 = F.randn((g.number_of_src_nodes(), 5)) x0 = F.randn((g.number_of_dst_nodes(), 5)) > h1 = edge_conv(g, (h0, x0)) tests\pytorch\test_nn.py:986: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\edgeconv.py:171: in forward g.apply_edges(fn.v_sub_u('x', 'x', 'theta')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.4952, -0.5542, 0.0810, 1.6316, -0.8154], [ 1.2885, -0.0511, -0.5976, 0.2904, 0.5324], [-0.5083, 0.8421, -1.8440, 0.1749, 0.1064]]) rhs_data = tensor([[ 1.0526, 0.0246, -0.4953, 1.4015, 0.0063], [-0.9872, -1.0135, 0.9635, -0.6248, -0.9060], ...0.2247], [ 0.2773, -0.5805, -0.4164, -1.7540, -1.0479], [-0.6316, 0.9465, -0.1150, -0.8930, -0.4859]]) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ---------------------------- Captured stdout call ----------------------------- EdgeConv( (theta): Linear(in_features=5, out_features=2, bias=True) (phi): Linear(in_features=5, out_features=2, bias=True) ) ______________________ test_dotgat_conv[1-1-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.4171]], [[ 0.1313]], [[-0.0037]], [[ 0.3055]], [[-0.8149]], ...054]], [[-0.4450]], [[ 0.1160]], [[ 0.1449]], [[ 0.5042]]], grad_fn=) rhs_data = tensor([[[-0.4171]], [[ 0.1313]], [[-0.0037]], [[ 0.3055]], [[-0.8149]], ...054]], [[-0.4450]], [[ 0.1160]], [[ 0.1449]], [[ 0.5042]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 1.1489]], [[ 0.5041]], [[ 0.0415]], [[-0.1781]], [[-0.3042]], ...948]], [[ 0.0383]], [[-0.6830]], [[ 0.4236]], [[-0.2631]]], grad_fn=) rhs_data = tensor([[[ 1.1489]], [[ 0.5041]], [[ 0.0415]], [[-0.1781]], [[-0.3042]], ...948]], [[ 0.0383]], [[-0.6830]], [[ 0.4236]], [[-0.2631]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.0316]], [[-0.2107]], [[-0.3332]], [[ 0.7550]], [[ 0.8863]], ...091]], [[-0.0951]], [[ 0.3141]], [[ 1.2935]], [[-0.8214]]], grad_fn=) rhs_data = tensor([[[ 0.0316]], [[-0.2107]], [[-0.3332]], [[ 0.7550]], [[ 0.8863]], ...091]], [[-0.0951]], [[ 0.3141]], [[ 1.2935]], [[-0.8214]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.0455]], [[ 1.3344]], [[ 1.0264]], [[ 0.2271]], [[ 0.4164]], ...385]], [[-0.6401]], [[ 0.2951]], [[ 0.0372]], [[-0.8960]]], grad_fn=) rhs_data = tensor([[[-0.0455]], [[ 1.3344]], [[ 1.0264]], [[ 0.2271]], [[ 0.4164]], ...385]], [[-0.6401]], [[ 0.2951]], [[ 0.0372]], [[-0.8960]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.4019]], [[-0.1592]], [[ 0.6658]], [[-0.5920]], [[-0.9961]], ...380]], [[-1.0740]], [[ 1.1565]], [[-0.2682]], [[-0.8298]]], grad_fn=) rhs_data = tensor([[[-0.4019]], [[-0.1592]], [[ 0.6658]], [[-0.5920]], [[-0.9961]], ...380]], [[-1.0740]], [[ 1.1565]], [[-0.2682]], [[-0.8298]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.1679]], [[ 0.0703]], [[-0.9326]], [[-0.5227]], [[ 0.5781]], ...232]], [[ 0.2986]], [[-0.5231]], [[ 1.1090]], [[ 0.5377]]], grad_fn=) rhs_data = tensor([[[ 0.1679]], [[ 0.0703]], [[-0.9326]], [[-0.5227]], [[ 0.5781]], ...232]], [[ 0.2986]], [[-0.5231]], [[ 1.1090]], [[ 0.5377]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.1912]], [[ 0.3232]], [[ 0.3688]], [[-0.2081]], [[-0.6082]], [[-0.7385]], [[-0.0534]], [[ 0.2735]], [[-0.3544]]], grad_fn=) rhs_data = tensor([[[ 0.1912]], [[ 0.3232]], [[ 0.3688]], [[-0.2081]], [[-0.6082]], [[-0.7385]], [[-0.0534]], [[ 0.2735]], [[-0.3544]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.3769]], [[-0.1504]], [[ 0.5771]], [[ 0.9457]], [[-0.1547]], [[-0.5143]], [[-0.1559]], [[ 0.9152]], [[ 0.6502]]], grad_fn=) rhs_data = tensor([[[ 0.3769]], [[-0.1504]], [[ 0.5771]], [[ 0.9457]], [[-0.1547]], [[-0.5143]], [[-0.1559]], [[ 0.9152]], [[ 0.6502]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.0502]], [[ 1.2972]], [[-0.6443]], [[ 0.2333]], [[-0.8309]], ...263]], [[-0.6932]], [[-0.4154]], [[-0.1857]], [[ 0.9152]]], grad_fn=) rhs_data = tensor([[[-0.0502]], [[ 1.2972]], [[-0.6443]], [[ 0.2333]], [[-0.8309]], ...263]], [[-0.6932]], [[-0.4154]], [[-0.1857]], [[ 0.9152]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.1828]], [[-0.4214]], [[-0.1871]], [[ 0.6381]], [[ 0.2076]], ...946]], [[ 0.0509]], [[ 0.4553]], [[ 0.2720]], [[ 0.2266]]], grad_fn=) rhs_data = tensor([[[ 0.1828]], [[-0.4214]], [[-0.1871]], [[ 0.6381]], [[ 0.2076]], ...946]], [[ 0.0509]], [[ 0.4553]], [[ 0.2720]], [[ 0.2266]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g5-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 1.1225]], [[ 0.5864]], [[ 0.0394]], [[-0.2209]], [[-0.4053]], [[ 1.6284]]], grad_fn=) rhs_data = tensor([[[1.1225]], [[0.5864]], [[0.0394]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g5-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.1615]], [[ 0.2548]], [[ 0.6277]], [[-0.2624]], [[ 0.3890]], [[ 0.3343]]], grad_fn=) rhs_data = tensor([[[-0.1615]], [[ 0.2548]], [[ 0.6277]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g6-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.8170]], [[-0.1959]], [[ 0.4661]], [[ 0.8583]], [[ 0.5681]], ...143]], [[ 0.4732]], [[-0.5134]], [[-0.4693]], [[-0.4494]]], grad_fn=) rhs_data = tensor([[[-0.8170]], [[-0.1959]], [[ 0.4661]], [[ 0.8583]], [[ 0.5681]], ...143]], [[ 0.4732]], [[-0.5134]], [[-0.4693]], [[-0.4494]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-1-g6-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.1568]], [[ 0.4040]], [[-0.4166]], [[-0.4490]], [[ 0.1679]], ...186]], [[ 0.0533]], [[ 0.2031]], [[ 0.1075]], [[-0.4217]]], grad_fn=) rhs_data = tensor([[[-0.1568]], [[ 0.4040]], [[-0.4166]], [[-0.4490]], [[ 0.1679]], ...186]], [[ 0.0533]], [[ 0.2031]], [[ 0.1075]], [[-0.4217]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.1434, 0.3204]], [[-0.8403, -0.3625]], [[ 0.8121, 0.2371]], [[ 0.2334, -0.221..., [[ 0.3428, 0.4673]], [[ 0.0996, 0.1325]], [[ 0.8194, 0.6052]]], grad_fn=) rhs_data = tensor([[[-0.1434, 0.3204]], [[-0.8403, -0.3625]], [[ 0.8121, 0.2371]], [[ 0.2334, -0.221..., [[ 0.3428, 0.4673]], [[ 0.0996, 0.1325]], [[ 0.8194, 0.6052]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.8656, 0.0412]], [[-0.2546, -0.6253]], [[ 0.7924, 0.8917]], [[ 0.0384, 1.233..., [[ 0.0440, -0.6401]], [[ 0.0309, -1.5200]], [[ 0.2442, -0.6399]]], grad_fn=) rhs_data = tensor([[[ 0.8656, 0.0412]], [[-0.2546, -0.6253]], [[ 0.7924, 0.8917]], [[ 0.0384, 1.233..., [[ 0.0440, -0.6401]], [[ 0.0309, -1.5200]], [[ 0.2442, -0.6399]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-6.1478e-01, -2.5440e-01]], [[ 6.9270e-01, 2.9113e-01]], [[-1.3624e+00, -8.7509e-01]], ..., 1.8246e-01]], [[-7.5135e-01, -7.3620e-01]], [[ 3.1665e-01, 6.3831e-02]]], grad_fn=) rhs_data = tensor([[[-6.1478e-01, -2.5440e-01]], [[ 6.9270e-01, 2.9113e-01]], [[-1.3624e+00, -8.7509e-01]], ..., 1.8246e-01]], [[-7.5135e-01, -7.3620e-01]], [[ 3.1665e-01, 6.3831e-02]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.0764, 0.0497]], [[ 0.1910, 0.5693]], [[ 0.9781, 1.2985]], [[-0.0146, -0.807..., [[-0.9079, 1.0759]], [[ 0.6067, 0.3260]], [[ 0.0073, 0.8853]]], grad_fn=) rhs_data = tensor([[[ 0.0764, 0.0497]], [[ 0.1910, 0.5693]], [[ 0.9781, 1.2985]], [[-0.0146, -0.807..., [[-0.9079, 1.0759]], [[ 0.6067, 0.3260]], [[ 0.0073, 0.8853]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 2.2440, -0.8786]], [[-0.1177, -0.0369]], [[-1.1287, 1.2949]], [[-0.5776, -0.003..., [[-0.8601, 0.3388]], [[-0.2230, -0.5133]], [[ 1.3118, -1.2113]]], grad_fn=) rhs_data = tensor([[[ 2.2440, -0.8786]], [[-0.1177, -0.0369]], [[-1.1287, 1.2949]], [[-0.5776, -0.003..., [[-0.8601, 0.3388]], [[-0.2230, -0.5133]], [[ 1.3118, -1.2113]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-1.3013, -0.8579]], [[-0.3501, -0.1292]], [[-0.4198, -0.7377]], [[-0.7654, -0.321..., [[ 0.5930, -0.2829]], [[ 1.1805, 0.4619]], [[-1.4865, -1.5151]]], grad_fn=) rhs_data = tensor([[[-1.3013, -0.8579]], [[-0.3501, -0.1292]], [[-0.4198, -0.7377]], [[-0.7654, -0.321..., [[ 0.5930, -0.2829]], [[ 1.1805, 0.4619]], [[-1.4865, -1.5151]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.6153, -1.6369]], [[ 0.7997, 0.1144]], [[-0.5679, -0.8437]], [[-0.6164, 0.159..., [[-0.4480, 0.4896]], [[ 1.2006, -0.5437]], [[ 0.5249, -0.6020]]], grad_fn=)Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' rhs_data = tensor([[[ 0.6153, -1.6369]], [[ 0.7997, 0.1144]], [[-0.5679, -0.8437]], [[-0.6164, 0.159..., [[-0.4480, 0.4896]], [[ 1.2006, -0.5437]], [[ 0.5249, -0.6020]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-1.1685, -0.0319]], [[ 0.3663, -0.3267]], [[ 0.4925, -0.2051]], [[-0.4170, -1.306..., [[ 0.2416, 0.6640]], [[-0.0258, 0.5421]], [[-0.3742, 0.0142]]], grad_fn=) rhs_data = tensor([[[-1.1685, -0.0319]], [[ 0.3663, -0.3267]], [[ 0.4925, -0.2051]], [[-0.4170, -1.306..., [[ 0.2416, 0.6640]], [[-0.0258, 0.5421]], [[-0.3742, 0.0142]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.6726, -0.8222]], [[ 0.2960, -0.3414]], [[ 0.6060, 0.4467]], [[ 0.4379, 0.035..., [[-0.9098, -0.6567]], [[-0.8609, -1.7556]], [[ 0.9726, 0.8830]]], grad_fn=) rhs_data = tensor([[[-0.6726, -0.8222]], [[ 0.2960, -0.3414]], [[ 0.6060, 0.4467]], [[ 0.4379, 0.035..., [[-0.9098, -0.6567]], [[-0.8609, -1.7556]], [[ 0.9726, 0.8830]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.3572, 0.5924]], [[ 0.3099, -0.3014]], [[ 0.5014, -0.3931]], [[ 0.2107, -0.216..., [[-0.7109, 0.0086]], [[-0.0064, -0.0604]], [[-0.0854, -1.9289]]], grad_fn=) rhs_data = tensor([[[-0.3572, 0.5924]], [[ 0.3099, -0.3014]], [[ 0.5014, -0.3931]], [[ 0.2107, -0.216..., [[-0.7109, 0.0086]], [[-0.0064, -0.0604]], [[-0.0854, -1.9289]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g5-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.9792, -0.1193]], [[-0.1085, 0.1791]], [[ 0.0034, 0.4012]], [[ 0.7534, -0.2448]], [[ 0.2681, -0.4127]], [[ 0.0963, -0.4894]]], grad_fn=) rhs_data = tensor([[[ 0.9792, -0.1193]], [[-0.1085, 0.1791]], [[ 0.0034, 0.4012]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g5-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.9397, -0.3649]], [[-0.2243, -0.4058]], [[-1.0624, 0.1599]], [[ 0.3047, -0.5614]], [[-0.5164, -0.0374]], [[-0.4025, 0.2307]]], grad_fn=) rhs_data = tensor([[[ 0.9397, -0.3649]], [[-0.2243, -0.4058]], [[-1.0624, 0.1599]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g6-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.2864, 0.2502]], [[ 0.2364, 0.2355]], [[-0.1403, -0.3415]], [[-0.0506, -0.166..., [[ 1.0967, 0.4553]], [[ 0.2856, 0.1267]], [[ 0.2275, 0.0083]]], grad_fn=) rhs_data = tensor([[[ 0.2864, 0.2502]], [[ 0.2364, 0.2355]], [[-0.1403, -0.3415]], [[-0.0506, -0.166..., [[ 1.0967, 0.4553]], [[ 0.2856, 0.1267]], [[ 0.2275, 0.0083]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[1-2-g6-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.3321, -0.0618]], [[ 1.1738, 0.3312]], [[-0.4129, -0.3142]], [[ 0.6808, 0.482..., [[ 0.2299, -0.2887]], [[-0.0339, -0.3812]], [[-1.2863, -0.6661]]], grad_fn=) rhs_data = tensor([[[-0.3321, -0.0618]], [[ 1.1738, 0.3312]], [[-0.4129, -0.3142]], [[ 0.6808, 0.482..., [[ 0.2299, -0.2887]], [[-0.0339, -0.3812]], [[-1.2863, -0.6661]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.2628], [ 0.4857], [-0.4749], [-0.5497]], [[ 0.1419], [ 0.040... [ 0.4177]], [[-0.2613], [-0.5927], [-0.1104], [ 0.7921]]], grad_fn=) rhs_data = tensor([[[ 0.2628], [ 0.4857], [-0.4749], [-0.5497]], [[ 0.1419], [ 0.040... [ 0.4177]], [[-0.2613], [-0.5927], [-0.1104], [ 0.7921]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.9615], [-0.5329], [-0.6953], [-0.0452]], [[-0.0382], [-0.020... [ 0.2506]], [[ 0.7174], [-0.3499], [-0.1276], [ 0.5352]]], grad_fn=) rhs_data = tensor([[[ 0.9615], [-0.5329], [-0.6953], [-0.0452]], [[-0.0382], [-0.020... [ 0.2506]], [[ 0.7174], [-0.3499], [-0.1276], [ 0.5352]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 2.5271e-01], [ 7.7926e-02], [-1.6030e-01], [ 2.4801e-01]], [[ 4.8758e-0... [[ 6.9294e-01], [ 1.3202e+00], [-1.4746e+00], [ 2.4003e-02]]], grad_fn=) rhs_data = tensor([[[ 2.5271e-01], [ 7.7926e-02], [-1.6030e-01], [ 2.4801e-01]], [[ 4.8758e-0... [[ 6.9294e-01], [ 1.3202e+00], [-1.4746e+00], [ 2.4003e-02]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.3748], [ 0.7783], [ 0.1260], [ 0.2537]], [[-0.3613], [-0.277... [ 0.1384]], [[ 0.1680], [ 0.2012], [-0.1219], [-0.4177]]], grad_fn=) rhs_data = tensor([[[ 0.3748], [ 0.7783], [ 0.1260], [ 0.2537]], [[-0.3613], [-0.277... [ 0.1384]], [[ 0.1680], [ 0.2012], [-0.1219], [-0.4177]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.0588], [-0.9932], [ 0.7851], [ 0.4306]], [[-0.5548], [ 0.921... [-0.1073]], [[ 0.1017], [-0.2846], [-0.1900], [-0.1226]]], grad_fn=) rhs_data = tensor([[[ 0.0588], [-0.9932], [ 0.7851], [ 0.4306]], [[-0.5548], [ 0.921... [-0.1073]], [[ 0.1017], [-0.2846], [-0.1900], [-0.1226]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.4551], [-0.8097], [ 0.7426], [-0.2365]], [[-0.7178], [ 0.066... [ 0.2029]], [[ 0.0706], [ 0.4135], [-0.6640], [ 0.7454]]], grad_fn=) rhs_data = tensor([[[-0.4551], [-0.8097], [ 0.7426], [-0.2365]], [[-0.7178], [ 0.066... [ 0.2029]], [[ 0.0706], [ 0.4135], [-0.6640], [ 0.7454]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.5471], [-0.0331], [-0.1496], [-0.2486]], [[-0.2693], [-0.449... [ 0.2819]], [[ 0.1578], [ 0.1815], [-0.0084], [-0.0032]]], grad_fn=) rhs_data = tensor([[[ 0.5471], [-0.0331], [-0.1496], [-0.2486]], [[-0.2693], [-0.449... [ 0.2819]], [[ 0.1578], [ 0.1815], [-0.0084], [-0.0032]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-8.0979e-01], [-4.1980e-01], [ 1.5915e+00], [ 8.0311e-01]], [[-1.2550e-0... [[ 5.2418e-01], [ 2.4671e-01], [-4.2148e-01], [-3.8784e-01]]], grad_fn=) rhs_data = tensor([[[-8.0979e-01], [-4.1980e-01], [ 1.5915e+00], [ 8.0311e-01]], [[-1.2550e-0... [[ 5.2418e-01], [ 2.4671e-01], [-4.2148e-01], [-3.8784e-01]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 2.3130], [ 0.1837], [ 0.8431], [-1.8198]], [[-0.4535], [ 0.331... [-0.7223]], [[ 0.1381], [ 0.7241], [ 0.5618], [ 0.4803]]], grad_fn=) rhs_data = tensor([[[ 2.3130], [ 0.1837], [ 0.8431], [-1.8198]], [[-0.4535], [ 0.331... [-0.7223]], [[ 0.1381], [ 0.7241], [ 0.5618], [ 0.4803]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.5456], [ 0.0891], [ 0.8867], [-0.5136]], [[ 0.1496], [-0.124... [-0.0805]], [[-0.5656], [-0.2194], [-0.8757], [ 0.3788]]], grad_fn=) rhs_data = tensor([[[ 0.5456], [ 0.0891], [ 0.8867], [-0.5136]], [[ 0.1496], [-0.124... [-0.0805]], [[-0.5656], [-0.2194], [-0.8757], [ 0.3788]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g5-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.3592], [-0.3527], [-0.2985], [ 0.0539]], [[-1.6622], [ 0.424... [ 0.3472]], [[ 0.2734], [ 0.8207], [ 0.4990], [-0.0491]]], grad_fn=) rhs_data = tensor([[[ 0.3592], [-0.3527], [-0.2985], [ 0.0539]], [[-1.6622], [ 0.424...[ 0.7632]], [[-0.0163], [ 0.0141], [ 0.6946], [ 0.1270]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g5-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.0602], [ 0.1747], [-0.2630], [-0.1540]], [[ 0.7330], [-1.072... [-0.4085]], [[-0.2021], [-0.8088], [ 0.1552], [-0.3202]]], grad_fn=) rhs_data = tensor([[[ 0.0602], [ 0.1747], [-0.2630], [-0.1540]], [[ 0.7330], [-1.072...[ 0.1855]], [[-0.6858], [-0.0074], [-0.1308], [-0.3257]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g6-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.0898], [-0.0545], [-0.3276], [ 0.4689]], [[-0.0208], [ 0.596... [-0.0067]], [[ 0.4353], [-0.4514], [-0.5834], [ 0.7401]]], grad_fn=) rhs_data = tensor([[[ 0.0898], [-0.0545], [-0.3276], [ 0.4689]], [[-0.0208], [ 0.596... [-0.0067]], [[ 0.4353], [-0.4514], [-0.5834], [ 0.7401]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-1-g6-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.2853], [-0.0968], [-0.1306], [-0.1212]], [[-0.4239], [ 0.325... [-0.1908]], [[ 0.9640], [-0.2568], [ 0.3576], [-0.0646]]], grad_fn=) rhs_data = tensor([[[ 0.2853], [-0.0968], [-0.1306], [-0.1212]], [[-0.4239], [ 0.325... [-0.1908]], [[ 0.9640], [-0.2568], [ 0.3576], [-0.0646]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.4931, -0.3066], [-0.7155, -1.3766], [-0.0161, -0.2475], [-0.3122, -0.5728]], ...2334], [ 0.0299, -0.1628], [-0.0382, -0.4620], [-0.0407, 0.5088]]], grad_fn=) rhs_data = tensor([[[ 0.4931, -0.3066], [-0.7155, -1.3766], [-0.0161, -0.2475], [-0.3122, -0.5728]], ...2334], [ 0.0299, -0.1628], [-0.0382, -0.4620], [-0.0407, 0.5088]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-1.1386e+00, -4.4391e-01], [ 3.7796e-01, 8.4842e-01], [ 3.2564e-01, -2.6513e-01], ...e-01, 9.8109e-02], [-1.7835e-02, -7.3542e-01], [ 6.7590e-01, -1.6140e-01]]], grad_fn=) rhs_data = tensor([[[-1.1386e+00, -4.4391e-01], [ 3.7796e-01, 8.4842e-01], [ 3.2564e-01, -2.6513e-01], ...e-01, 9.8109e-02], [-1.7835e-02, -7.3542e-01], [ 6.7590e-01, -1.6140e-01]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.1413, 0.0599], [ 0.4125, -0.1218], [ 0.4898, -0.3687], [-0.0029, -0.3604]], ...6279], [ 0.3096, -0.2107], [ 0.4226, -0.1336], [ 0.1092, -0.0199]]], grad_fn=) rhs_data = tensor([[[-0.1413, 0.0599], [ 0.4125, -0.1218], [ 0.4898, -0.3687], [-0.0029, -0.3604]], ...6279], [ 0.3096, -0.2107], [ 0.4226, -0.1336], [ 0.1092, -0.0199]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.4017, -0.6442], [ 0.2983, -0.1432], [-1.0643, -0.5747], [-0.3928, -0.8110]], ...5875], [ 0.2696, 0.8920], [ 0.4336, -0.6929], [ 0.0164, -0.7280]]], grad_fn=) rhs_data = tensor([[[-0.4017, -0.6442], [ 0.2983, -0.1432], [-1.0643, -0.5747], [-0.3928, -0.8110]], ...5875], [ 0.2696, 0.8920], [ 0.4336, -0.6929], [ 0.0164, -0.7280]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.3967, -1.1596], [-0.2107, -0.0525], [-0.3646, 0.6873], [ 1.3665, 0.2432]], ...4121], [ 0.6462, -0.2959], [-0.8253, 0.0852], [ 0.5070, 0.5118]]], grad_fn=) rhs_data = tensor([[[ 0.3967, -1.1596], [-0.2107, -0.0525], [-0.3646, 0.6873], [ 1.3665, 0.2432]], ...4121], [ 0.6462, -0.2959], [-0.8253, 0.0852], [ 0.5070, 0.5118]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 4.6382e-01, 8.1709e-01], [ 4.9443e-01, 3.6309e-01], [-8.6739e-01, -5.9274e-01], ...e-01, -6.8233e-01], [ 8.4753e-01, 7.3353e-01], [-6.8092e-01, -2.1011e-01]]], grad_fn=) rhs_data = tensor([[[ 4.6382e-01, 8.1709e-01], [ 4.9443e-01, 3.6309e-01], [-8.6739e-01, -5.9274e-01], ...e-01, -6.8233e-01], [ 8.4753e-01, 7.3353e-01], [-6.8092e-01, -2.1011e-01]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.4989, -0.9164], [-0.5876, -0.9108], [-1.1572, 0.7104], [-0.5259, -0.4017]], ...1412], [ 0.0400, -0.5273], [-0.9923, 0.7896], [-0.0921, -1.1406]]], grad_fn=) rhs_data = tensor([[[-0.4989, -0.9164], [-0.5876, -0.9108], [-1.1572, 0.7104], [-0.5259, -0.4017]], ...1412], [ 0.0400, -0.5273], [-0.9923, 0.7896], [-0.0921, -1.1406]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.1732, 0.1927], [ 0.0238, 0.0952], [ 0.0835, 0.4412], [-0.4363, -0.3036]], ...2506], [-0.2434, 0.4468], [-0.0999, 0.1084], [ 0.5013, 0.6683]]], grad_fn=) rhs_data = tensor([[[-0.1732, 0.1927], [ 0.0238, 0.0952], [ 0.0835, 0.4412], [-0.4363, -0.3036]], ...2506], [-0.2434, 0.4468], [-0.0999, 0.1084], [ 0.5013, 0.6683]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.8579, -0.0380], [ 0.6015, -0.5070], [-0.0785, 0.5414], [-1.0351, 0.0215]], ...0254], [-0.0309, 0.5453], [-0.2483, -0.4041], [ 0.3795, -0.6567]]], grad_fn=) rhs_data = tensor([[[ 0.8579, -0.0380], [ 0.6015, -0.5070], [-0.0785, 0.5414], [-1.0351, 0.0215]], ...0254], [-0.0309, 0.5453], [-0.2483, -0.4041], [ 0.3795, -0.6567]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.0511, 0.0641], [-0.3525, 0.5721], [-0.1648, -1.1612], [-0.0997, -0.0737]], ...6743], [ 0.2523, -0.1218], [-0.1366, -0.3037], [-0.1647, 0.2105]]], grad_fn=) rhs_data = tensor([[[ 0.0511, 0.0641], [-0.3525, 0.5721], [-0.1648, -1.1612], [-0.0997, -0.0737]], ...6743], [ 0.2523, -0.1218], [-0.1366, -0.3037], [-0.1647, 0.2105]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g5-idtype0] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.8223, 0.1636], [-0.1698, 0.9940], [ 0.7054, 0.1483], [ 0.2376, 0.9035]], ...1099], [-0.0374, -0.2017], [ 0.0092, 0.2701], [ 0.2782, 0.1475]]], grad_fn=) rhs_data = tensor([[[ 0.8223, 0.1636], [-0.1698, 0.9940], [ 0.7054, 0.1483], [ 0.2376, 0.9035]], ...781], [-0.9772, 1.0206], [ 0.4823, 1.7351], [-0.4486, 0.4658]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g5-idtype1] _______________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.0454, -0.6071], [-0.0934, -0.2434], [ 0.7391, 1.0351], [-0.3213, -1.2373]], ...0815], [ 0.2930, -0.0952], [-0.4833, -0.0196], [ 0.0548, 0.3162]]], grad_fn=) rhs_data = tensor([[[-0.0454, -0.6071], [-0.0934, -0.2434], [ 0.7391, 1.0351], [-0.3213, -1.2373]], ...286], [ 0.2600, -0.2596], [ 0.3131, 0.3927], [ 0.4486, -0.2778]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g6-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.1223, -0.0246], [-0.2674, 0.6928], [-0.0086, 0.2498], [-0.3322, 0.2452]], ...2817], [-0.0403, -0.7375], [ 0.3018, 0.1425], [ 0.7630, 0.1669]]], grad_fn=) rhs_data = tensor([[[-0.1223, -0.0246], [-0.2674, 0.6928], [-0.0086, 0.2498], [-0.3322, 0.2452]], ...2817], [-0.0403, -0.7375], [ 0.3018, 0.1425], [ 0.7630, 0.1669]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ______________________ test_dotgat_conv[4-2-g6-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv(5, out_dim, num_heads) feat = F.randn((g.number_of_src_nodes(), 5)) dotgat = dotgat.to(ctx) # test pickle th.save(dotgat, tmp_buffer) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1003: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.0693, 1.3967], [-0.3664, 0.3257], [-0.1604, -0.1506], [-1.0128, 0.5001]], ...2718], [ 0.1667, 0.1350], [-0.5472, -0.0505], [-0.0309, 0.5017]]], grad_fn=) rhs_data = tensor([[[ 0.0693, 1.3967], [-0.3664, 0.3257], [-0.1604, -0.1506], [-1.0128, 0.5001]], ...2718], [ 0.1667, 0.1350], [-0.5472, -0.0505], [-0.0309, 0.5017]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[1-1-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.9694]], [[ 0.5359]]], grad_fn=) rhs_data = tensor([[[-0.5836]], [[ 0.7600]], [[-0.4577]], [[-0.2557]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[1-1-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.8335]], [[ 0.1975]]], grad_fn=) rhs_data = tensor([[[-0.3493]], [[-0.3559]], [[ 0.2142]], [[-0.5639]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[1-1-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.5240]], [[ 1.3672]], [[ 0.8047]], [[-0.0724]], [[-0.4126]], [[-0.6942]]], grad_fn=) rhs_data = tensor([[[0.2445]], [[0.4472]], [[0.9522]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[1-1-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.5805]], [[-0.4286]], [[ 0.6778]], [[ 0.1553]], [[ 0.0926]], [[ 0.3704]]], grad_fn=) rhs_data = tensor([[[ 0.8152]], [[-0.3201]], [[-0.6047]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[1-2-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.9265, -0.1147]], [[-0.7990, 0.3471]]], grad_fn=) rhs_data = tensor([[[-0.6096, 0.4431]], [[ 0.4772, -0.1395]], [[-0.7384, 0.5005]], [[ 0.1076, -0.6642]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[1-2-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.1724, -0.4161]], [[ 0.5570, -0.0535]]], grad_fn=) rhs_data = tensor([[[-0.2410, 1.0052]], [[ 0.1189, 0.0172]], [[ 0.0714, -0.1548]], [[-0.7236, -0.0960]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[1-2-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.2222, 0.4907]], [[-0.6510, 0.5893]], [[-0.1243, -0.0870]], [[-1.3795, 1.1179]], [[ 1.1190, -0.7951]], [[-0.9992, 0.6057]]], grad_fn=) rhs_data = tensor([[[-0.0155, 0.0023]], [[ 0.2366, -0.0762]], [[ 0.1407, -0.3928]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[1-2-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.4264, 0.1563]], [[-0.4641, 1.5523]], [[-0.5701, -0.0172]], [[-0.6126, 0.7367]], [[ 0.1619, 0.2153]], [[ 1.2304, -0.9524]]], grad_fn=) rhs_data = tensor([[[ 0.0657, 0.1810]], [[-0.2508, 0.4212]], [[-0.0944, 0.0794]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[4-1-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.4830], [-0.1613], [ 0.1324], [ 0.3634]], [[ 0.5974], [ 0.1193], [-0.0037], [-0.1589]]], grad_fn=) rhs_data = tensor([[[-0.4929], [ 1.2497], [ 1.1664], [ 0.9875]], [[-0.6167], [ 0.577... [ 0.5114]], [[-1.1181], [ 0.7334], [ 0.7711], [ 0.9342]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[4-1-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[0.6500], [0.1414], [0.1042], [0.8920]], [[0.3820], [0.7157], [0.7959], [0.3495]]], grad_fn=) rhs_data = tensor([[[-0.1814], [ 0.2764], [-0.3175], [-0.2684]], [[-0.3241], [ 0.849... [-0.2943]], [[-0.0895], [-0.3085], [ 0.3221], [ 0.0168]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[4-1-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.4656], [-0.6085], [-0.0667], [-0.5637]], [[-0.0830], [-0.151... [ 0.6698]], [[-0.3442], [ 0.2146], [ 0.7411], [-0.5289]]], grad_fn=) rhs_data = tensor([[[ 0.4106], [-0.9586], [-1.4979], [-0.7013]], [[ 0.7881], [-0.826... [-0.1119]], [[-0.8860], [ 0.8015], [-0.0748], [ 0.3975]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[4-1-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.0849], [ 0.3540], [ 0.0689], [ 0.0529]], [[-0.3363], [-0.627... [ 0.4526]], [[-0.6140], [ 0.5132], [ 1.0290], [ 0.6017]]], grad_fn=) rhs_data = tensor([[[ 0.8828], [-0.1587], [ 0.7704], [-0.5350]], [[ 0.1671], [ 0.111... [-0.5320]], [[ 0.1875], [ 0.0708], [-0.0523], [ 0.1025]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[4-2-g0-idtype0] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.0498, 0.5550], [ 0.2347, 0.6748], [ 0.2003, 0.4885], [ 0.0236, 0.2013]], ...2404], [-0.6822, -0.3418], [-0.4372, 0.5612], [ 0.5821, -0.4192]]], grad_fn=) rhs_data = tensor([[[-0.3013, 0.6716], [-0.8220, 0.9957], [-1.1052, -0.6140], [-0.6755, -0.5250]], ...3560], [-0.2628, -0.8790], [ 0.3457, 0.1349], [-0.1346, 0.4277]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[4-2-g0-idtype1] _____________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[ 0.2981, 0.2188], [-0.2406, -0.8796], [ 0.2949, -0.0015], [-0.3675, -0.2658]], ...4028], [ 0.3603, 1.1409], [-1.1468, -0.0301], [ 0.8810, -0.4608]]], grad_fn=) rhs_data = tensor([[[ 0.2881, -0.2651], [ 0.2372, -0.0791], [ 0.1272, 0.1410], [-0.1721, -0.0077]], ...1961], [-0.1210, 0.2333], [-0.4226, 0.1699], [-0.7182, 0.5729]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[4-2-g1-idtype0] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.8512, -0.3414], [ 1.5003, -0.6760], [-0.4036, 0.1045], [ 0.3344, -1.4628]], ...1348], [ 1.0049, -0.2356], [-0.5077, 0.2977], [ 0.4937, -1.0519]]], grad_fn=) rhs_data = tensor([[[ 0.6201, -0.2883], [-0.3675, -0.0990], [ 0.1445, -0.1637], [-0.1185, 0.0800]], ...3691], [-0.5112, -0.2334], [ 0.2504, -0.2135], [-0.2540, 0.1269]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _____________________ test_dotgat_conv_bi[4-2-g1-idtype1] _____________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 2, num_heads = 4 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) @pytest.mark.parametrize('num_heads', [1, 4]) def test_dotgat_conv_bi(g, idtype, out_dim, num_heads): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() dotgat = nn.DotGatConv((5, 5), out_dim, num_heads) feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5))) dotgat = dotgat.to(ctx) > h = dotgat(g, feat) tests\pytorch\test_nn.py:1018: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\dotgatconv.py:205: in forward graph.apply_edges(fn.u_dot_v('ft', 'ft', 'a')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[-0.3785, 0.4268], [ 0.1302, 0.3379], [ 0.1440, -0.0128], [-0.1614, -0.0936]], ...9188], [-0.4152, -0.5345], [ 0.0572, 0.5560], [-0.8934, 0.4026]]], grad_fn=) rhs_data = tensor([[[-0.6983, 0.6638], [-1.0231, -0.5278], [-0.7298, 0.1425], [-0.4834, -0.4650]], ...0780], [-0.1142, -0.2039], [ 0.0024, 0.4723], [ 0.2392, -0.2105]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError ___________________________ test_dense_cheb_conv[1] ___________________________ out_dim = 1 @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_cheb_conv(out_dim): for k in range(1, 4): ctx = F.ctx() g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True) g = g.to(F.ctx()) adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() cheb = nn.ChebConv(5, out_dim, k, None) dense_cheb = nn.DenseChebConv(5, out_dim, k) #for i in range(len(cheb.fc)): # dense_cheb.W.data[i] = cheb.fc[i].weight.data.t() dense_cheb.W.data = cheb.linear.weight.data.transpose(-1, -2).view(k, 5, out_dim) if cheb.linear.bias is not None: dense_cheb.bias.data = cheb.linear.bias.data feat = F.randn((100, 5)) cheb = cheb.to(ctx) dense_cheb = dense_cheb.to(ctx) > out_cheb = cheb(g, feat, [2.0]) tests\pytorch\test_nn.py:1040: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\chebconv.py:130: in forward h = unnLaplacian(X_0, D_invsqrt, graph) python\dgl\nn\pytorch\conv\chebconv.py:103: in unnLaplacian graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5630, 0.4032, 0.3565, -0.0388, -0.0166], [ 0.0446, -0.0604, 0.1367, -0.0020, -0.0208], ...0.3573], [-0.1222, -0.3434, -0.2911, 0.1425, -0.6669], [-0.0159, -0.0764, -0.2173, -0.0154, 0.0497]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ---------------------------- Captured stdout call ----------------------------- 1 tensor([[-0.1483], [ 0.5646], [-0.1799], [ 0.4515], [ 0.4828], [ 0.1645], [-0.2216], [ 0.2288], [-0.0157], [-0.3422], [ 0.2072], [ 0.3209], [ 0.3846], [ 0.2681], [-0.0924], [-0.2921], [ 0.4173], [ 0.4343], [ 0.1131], [-0.0686], [ 0.4608], [ 0.3049], [ 0.2280], [ 0.5373], [ 0.2868], [ 0.3676], [ 0.3435], [ 0.1384], [ 0.3128], [-0.0893], [ 0.3390], [-0.1982], [ 0.6095], [-0.4951], [ 0.3317], [-0.1628], [-0.0403], [ 0.4532], [-0.1243], [ 0.0797], [ 0.4808], [-0.0392], [ 0.0935], [ 0.1351], [ 0.0286], [ 0.0858], [ 0.2192], [ 0.4419], [ 0.3408], [ 0.5475], [ 0.0143], [ 0.1049], [ 0.5359], [ 0.1154], [ 0.2162], [-0.0771], [ 0.5497], [ 0.1472], [ 0.4032], [ 0.6115], [ 0.1225], [-0.2362], [-0.1780], [-0.0076], [ 0.0823], [-0.1912], [ 0.1414], [-0.1802], [ 0.2309], [ 0.5352], [ 0.3177], [-0.1316], [ 0.3189], [ 0.1216], [ 0.2709], [-0.1656], [ 0.0413], [ 0.0174], [ 0.3980], [ 0.5580], [ 0.3642], [ 0.1090], [ 0.2172], [ 0.2753], [ 0.6316], [-0.3075], [ 0.4241], [ 0.4387], [ 0.2734], [ 0.4583], [-0.0487], [ 0.5076], [ 0.3659], [ 0.5654], [-0.3401], [ 0.3943], [-0.0047], [ 0.4762], [-0.0104], [-0.0068]], grad_fn=) tensor([[-0.1483], [ 0.5646], [-0.1799], [ 0.4515], [ 0.4828], [ 0.1645], [-0.2216], [ 0.2288], [-0.0157], [-0.3422], [ 0.2072], [ 0.3209], [ 0.3846], [ 0.2681], [-0.0924], [-0.2921], [ 0.4173], [ 0.4343], [ 0.1131], [-0.0686], [ 0.4608], [ 0.3049], [ 0.2280], [ 0.5373], [ 0.2868], [ 0.3676], [ 0.3435], [ 0.1384], [ 0.3128], [-0.0893], [ 0.3390], [-0.1982], [ 0.6095], [-0.4951], [ 0.3317], [-0.1628], [-0.0403], [ 0.4532], [-0.1243], [ 0.0797], [ 0.4808], [-0.0392], [ 0.0935], [ 0.1351], [ 0.0286], [ 0.0858], [ 0.2192], [ 0.4419], [ 0.3408], [ 0.5475], [ 0.0143], [ 0.1049], [ 0.5359], [ 0.1154], [ 0.2162], [-0.0771], [ 0.5497], [ 0.1472], [ 0.4032], [ 0.6115], [ 0.1225], [-0.2362], [-0.1780], [-0.0076], [ 0.0823], [-0.1912], [ 0.1414], [-0.1802], [ 0.2309], [ 0.5352], [ 0.3177], [-0.1316], [ 0.3189], [ 0.1216], [ 0.2709], [-0.1656], [ 0.0413], [ 0.0174], [ 0.3980], [ 0.5580], [ 0.3642], [ 0.1090], [ 0.2172], [ 0.2753], [ 0.6316], [-0.3075], [ 0.4241], [ 0.4387], [ 0.2734], [ 0.4583], [-0.0487], [ 0.5076], [ 0.3659], [ 0.5654], [-0.3401], [ 0.3943], [-0.0047], [ 0.4762], [-0.0104], [-0.0068]], grad_fn=) ___________________________ test_dense_cheb_conv[2] ___________________________ out_dim = 2 @pytest.mark.parametrize('out_dim', [1, 2]) def test_dense_cheb_conv(out_dim): for k in range(1, 4): ctx = F.ctx() g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True) g = g.to(F.ctx()) adj = g.adjacency_matrix(transpose=True, ctx=ctx).to_dense() cheb = nn.ChebConv(5, out_dim, k, None) dense_cheb = nn.DenseChebConv(5, out_dim, k) #for i in range(len(cheb.fc)): # dense_cheb.W.data[i] = cheb.fc[i].weight.data.t() dense_cheb.W.data = cheb.linear.weight.data.transpose(-1, -2).view(k, 5, out_dim) if cheb.linear.bias is not None: dense_cheb.bias.data = cheb.linear.bias.data feat = F.randn((100, 5)) cheb = cheb.to(ctx) dense_cheb = dense_cheb.to(ctx) > out_cheb = cheb(g, feat, [2.0]) tests\pytorch\test_nn.py:1040: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\chebconv.py:130: in forward h = unnLaplacian(X_0, D_invsqrt, graph) python\dgl\nn\pytorch\conv\chebconv.py:103: in unnLaplacian graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.2905e-01, 6.4358e-02, 2.5134e-01, -1.3856e-01, -1.7534e-01], [-3.5620e-02, 3.4000e-01, -2.6566e...01, -2.8050e-02, 4.4176e-02, 2.9358e-01], [-2.9128e-01, -3.2529e-01, -3.4963e-01, 7.1537e-01, 7.6390e-01]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ---------------------------- Captured stdout call ----------------------------- 1 tensor([[-0.2465, -0.6337], [ 0.7033, 0.0666], [ 0.1377, -0.7721], [-0.0125, -0.6798], [-0.0136, -0.8350], [ 0.5796, 0.6321], [ 0.6563, -0.1389], [ 0.8157, 0.5572], [ 0.5124, -0.7402], [ 0.0373, -0.4913], [ 0.0628, -0.2320], [-0.1307, -0.1633], [ 0.3715, -0.1237], [-0.2811, -0.9184], [ 0.5474, -0.2947], [ 0.1764, 0.4816], [ 0.9328, 0.7598], [ 0.8769, 0.6227], [ 0.1643, -0.1913], [-0.1127, 0.0972], [ 0.4026, -0.2674], [ 0.6276, 0.1105], [ 0.5037, -0.2416], [ 0.4262, 0.6545], [ 0.8236, 0.3003], [ 0.1003, -0.4666], [ 0.2677, -0.3673], [ 0.5369, 0.2568], [ 0.6343, 0.3808], [ 0.3723, -0.5850], [ 0.7706, 0.4047], [ 0.8998, 1.4726], [ 0.3942, 0.2480], [ 0.7651, 0.2438], [-0.1149, -0.6694], [ 0.1618, -0.1905], [ 0.0557, -0.6954], [ 0.1738, 0.3303], [ 0.4794, 1.0118], [ 0.4832, 0.3378], [ 0.3867, 0.3986], [-0.1022, -1.6046], [ 0.4078, 0.2784], [ 0.0392, -0.8187], [ 0.1138, -0.7104], [-0.7036, -1.4081], [ 0.6625, 1.5324], [ 0.8624, 1.3113], [ 0.3953, -0.5044], [ 0.6495, 0.1119], [ 0.6778, 0.5930], [ 0.6782, 0.2112], [ 0.3920, 1.0177], [ 0.0276, -0.6887], [ 0.6563, 0.9138], [ 0.4774, 0.0135], [ 0.0406, -0.3739], [ 0.3774, 0.9451], [ 1.2535, 1.4316], [ 0.8844, 0.0901], [ 0.3512, 0.3770], [ 0.3120, -0.2171], [-0.3557, -0.8810], [ 0.4348, -0.9008], [ 0.3494, 0.7059], [ 1.3247, 1.3000], [ 0.5807, -0.3567], [ 0.6728, 0.5278], [ 0.5968, 0.4298], [ 0.5961, -0.0569], [ 0.8899, 1.0007], [-0.1936, -0.6426], [-0.0083, 0.0156], [ 1.1627, 0.5378], [ 0.8846, 1.5960], [ 0.4053, -0.2602], [-0.0170, 0.3704], [ 0.3956, 0.8193], [ 0.6795, 1.0290], [ 1.1320, 0.9030], [ 0.4816, 0.0789], [ 0.3430, 0.3855], [ 0.5907, 1.1944], [ 0.8586, 0.5325], [ 0.1185, 0.7967], [ 0.5796, -0.4615], [ 0.5472, 0.4449], [ 0.6968, 0.3300], [ 0.2125, 0.5799], [ 1.3935, 1.1025], [-0.2531, -0.2949], [ 1.4429, 2.4493], [-0.0854, -0.4195], [ 1.0228, 0.9628], [ 0.2092, -0.4127], [ 0.0841, 0.3869], [ 0.7690, 0.5580], [ 0.4555, -0.5889], [ 0.4778, 0.1505], [ 0.1846, -0.1337]], grad_fn=) tensor([[-0.2465, -0.6337], [ 0.7033, 0.0666], [ 0.1377, -0.7721], [-0.0125, -0.6798], [-0.0136, -0.8350], [ 0.5796, 0.6321], [ 0.6563, -0.1389], [ 0.8157, 0.5572], [ 0.5124, -0.7402], [ 0.0373, -0.4913], [ 0.0628, -0.2320], [-0.1307, -0.1633], [ 0.3715, -0.1237], [-0.2811, -0.9184], [ 0.5474, -0.2947], [ 0.1764, 0.4816], [ 0.9328, 0.7598], [ 0.8769, 0.6227], [ 0.1643, -0.1913], [-0.1127, 0.0972], [ 0.4026, -0.2674], [ 0.6276, 0.1105], [ 0.5037, -0.2416], [ 0.4262, 0.6545], [ 0.8236, 0.3003], [ 0.1003, -0.4666], [ 0.2677, -0.3673], [ 0.5369, 0.2568], [ 0.6343, 0.3808], [ 0.3723, -0.5850], [ 0.7706, 0.4047], [ 0.8998, 1.4726], [ 0.3942, 0.2480], [ 0.7651, 0.2438], [-0.1149, -0.6694], [ 0.1618, -0.1905], [ 0.0557, -0.6954], [ 0.1738, 0.3303], [ 0.4794, 1.0118], [ 0.4832, 0.3378], [ 0.3867, 0.3986], [-0.1022, -1.6046], [ 0.4078, 0.2784], [ 0.0392, -0.8187], [ 0.1138, -0.7104], [-0.7036, -1.4081], [ 0.6625, 1.5324], [ 0.8624, 1.3113], [ 0.3953, -0.5044], [ 0.6495, 0.1119], [ 0.6778, 0.5930], [ 0.6782, 0.2112], [ 0.3920, 1.0177], [ 0.0276, -0.6887], [ 0.6563, 0.9138], [ 0.4774, 0.0135], [ 0.0406, -0.3739], [ 0.3774, 0.9451], [ 1.2535, 1.4316], [ 0.8844, 0.0901], [ 0.3512, 0.3770], [ 0.3120, -0.2171], [-0.3557, -0.8810], [ 0.4348, -0.9008], [ 0.3494, 0.7059], [ 1.3247, 1.3000], [ 0.5807, -0.3567], [ 0.6728, 0.5278], [ 0.5968, 0.4298], [ 0.5961, -0.0569], [ 0.8899, 1.0007], [-0.1936, -0.6426], [-0.0083, 0.0156], [ 1.1627, 0.5378], [ 0.8846, 1.5960], [ 0.4053, -0.2602], [-0.0170, 0.3704], [ 0.3956, 0.8193], [ 0.6795, 1.0290], [ 1.1320, 0.9030], [ 0.4816, 0.0789], [ 0.3430, 0.3855], [ 0.5907, 1.1944], [ 0.8586, 0.5325], [ 0.1185, 0.7967], [ 0.5796, -0.4615], [ 0.5472, 0.4449], [ 0.6968, 0.3300], [ 0.2125, 0.5799], [ 1.3935, 1.1025], [-0.2531, -0.2949], [ 1.4429, 2.4493], [-0.0854, -0.4195], [ 1.0228, 0.9628], [ 0.2092, -0.4127], [ 0.0841, 0.3869], [ 0.7690, 0.5580], [ 0.4555, -0.5889], [ 0.4778, 0.1505], [ 0.1846, -0.1337]], grad_fn=) _______________________________ test_sequential _______________________________ def test_sequential(): ctx = F.ctx() # Test single graph class ExampleLayer(th.nn.Module): def __init__(self): super().__init__() def forward(self, graph, n_feat, e_feat): graph = graph.local_var() graph.ndata['h'] = n_feat graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) n_feat += graph.ndata['h'] graph.apply_edges(fn.u_add_v('h', 'h', 'e')) e_feat += graph.edata['e'] return n_feat, e_feat g = dgl.DGLGraph() g.add_nodes(3) g.add_edges([0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2]) g = g.to(F.ctx()) net = nn.Sequential(ExampleLayer(), ExampleLayer(), ExampleLayer()) n_feat = F.randn((3, 4)) e_feat = F.randn((9, 4)) net = net.to(ctx) > n_feat, e_feat = net(g, n_feat, e_feat) tests\pytorch\test_nn.py:1069: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:221: in forward feats = module(graph, *feats) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1055: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7821, 1.0798, 0.4059, 0.4878], [ 0.2633, -0.6495, 0.9429, 0.3172], [ 1.2196, -1.3653, 0.2878, 0.1060]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g0-idtype0] __________________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-2.1406, 0.1408], [-0.3480, -0.1269]], grad_fn=) rhs_data = tensor([[-0.0792, 0.0550], [-0.0116, 0.3700], [-0.1034, -0.0047], [ 0.0517, 0.4648], ... 0.0475], [-0.0846, 0.1412], [-0.1397, 0.0826], [-0.0326, 0.2453]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g0-idtype1] __________________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 1.1229, -0.9106], [ 0.3747, -0.9606]], grad_fn=) rhs_data = tensor([[ 0.0743, -0.0270], [ 0.1241, 0.0067], [-0.1070, -0.4083], [ 0.2114, 0.0599], ... -0.1385], [ 0.0028, -0.1408], [-0.1319, -0.2701], [-0.1204, -0.4106]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g1-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hv': Scheme(shap...32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.3516, 0.1996], [-0.6192, 0.4839], [-0.1795, 0.6388], [-0.3852, 0.5561], ... 0.4915], [-0.2954, 0.6594], [-0.4244, 0.5406], [-0.5093, 0.6077]], grad_fn=) rhs_data = tensor([[0.1160, 0.5277], [0.2252, 0.5788], [0.0765, 0.2678], [0.1610, 0.5306], [0.122...0.0812, 0.2122], [0.1763, 0.4388], [0.2475, 0.6353], [0.1036, 0.3019]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g1-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hv': Scheme(shap...32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.4853, -0.4872], [-0.5734, -0.5029], [-0.7382, -0.4998], [-0.8256, -0.4660], ...-0.4933], [ 1.7662, -0.5391], [-0.9096, -0.4787], [-0.0372, -0.5028]], grad_fn=) rhs_data = tensor([[ 0.1204, 0.1568], [ 0.0907, -0.1325], [ 0.1201, 0.1643], [ 0.0878, -0.1580], ... -0.0334], [ 0.1355, 0.3316], [ 0.1002, -0.0463], [ 0.0871, -0.1614]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g2-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.4129, 0.2770], [-0.3130, 1.1279], [-0.3422, 1.3653], [-0.1728, 0.5021], ... 1.7256], [-0.2666, 2.1003], [ 0.1972, -0.0280], [-0.2829, -0.8954]], grad_fn=) rhs_data = tensor([[-0.2630, -0.2328], [-0.3101, -0.2241], [-0.3978, -0.2103], [-0.4193, -0.2163], ... -0.2444], [-0.4194, -0.2164], [-0.2891, -0.2358], [-0.3747, -0.2184]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False):Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g2-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.5189, 0.3301], [-0.3752, -1.0341], [ 0.1102, -1.3705], [ 1.4589, 0.3245], ...-0.2583], [ 0.4323, 0.0608], [ 1.1519, 0.1163], [ 1.0316, -0.9775]], grad_fn=) rhs_data = tensor([[-0.1110, -0.1610], [ 0.0553, -0.2688], [-0.1121, -0.1618], [-0.2924, -0.0733], ... -0.2423], [-0.2189, -0.1163], [-0.0722, -0.1944], [-0.2043, -0.1133]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g3-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hv': Scheme(shap... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.1345, -0.1930], [ 0.4006, -0.3208], [-0.5630, 0.1903], [-0.3892, 0.1667], ... 0.2174], [ 0.3060, -0.8140], [ 0.0836, -0.1974], [-0.0428, 0.1000]], grad_fn=) rhs_data = tensor([[ 0.3497, -0.0621], [ 0.3217, -0.0380], [ 0.2832, -0.1647], [ 0.2790, -0.0421], ... -0.1822], [ 0.2879, -0.0920], [ 0.2708, 0.0134], [ 0.3551, -0.1914]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g3-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hv': Scheme(shap... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.1889, -0.2599], [ 0.0762, -0.3780], [-0.0091, -1.2315], [ 0.2648, -0.9686], ...-0.9417], [-0.2435, -0.5967], [-0.3035, -0.1244], [-0.3837, -0.3527]], grad_fn=) rhs_data = tensor([[-0.1891, -0.2623], [-0.2137, -0.2967], [-0.1221, -0.2329], [-0.0936, -0.1789], ... -0.2179], [-0.0493, -0.1008], [-0.0831, -0.1617], [-0.1045, -0.1982]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g4-idtype0] __________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 6.4941e-01, 2.6193e-01], [ 1.7130e-01, -1.8927e-01], [ 4.3637e-01, -4.7804e-01], [ ...42e-01, -1.3411e-03], [ 5.1762e-03, -8.2245e-01], [-8.4013e-02, -1.2175e+00]], grad_fn=) rhs_data = tensor([[ 0.1037, -0.1837], [-0.0162, -0.0123], [ 0.0193, -0.0827], [ 0.0224, -0.0707], ... -0.1169], [ 0.0114, -0.0611], [ 0.0378, -0.1025], [ 0.0290, -0.0979]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g4-idtype1] __________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.3384, -0.0174], [-1.5532, -0.8653], [ 0.4789, 0.4795], [-2.1891, 1.3919], ...-1.3279], [ 1.0438, -1.4701], [-0.6542, 1.1595], [-2.2503, 0.0839]], grad_fn=) rhs_data = tensor([[ 0.2910, -0.0387], [ 0.2101, -0.0203], [ 0.2307, -0.0703], [ 0.1284, -0.0570], ... -0.0212], [ 0.1311, -0.0133], [ 0.1838, -0.0766], [ 0.4051, 0.0837]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g5-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 1.1695, 0.9294], [ 0.0270, 0.1165], [ 0.5317, 0.6153], [ 1.1138, 0.9625], ...-0.2283], [ 1.2296, 1.2729], [ 0.6608, 0.9211], [ 0.8738, 0.5206]], grad_fn=) rhs_data = tensor([[-0.0205, -0.0561], [ 0.0805, 0.0736], [ 0.1955, 0.1686], [-0.1237, -0.0482], ... 0.1129], [ 0.0796, 0.0208], [-0.0708, -0.0053], [ 0.0764, 0.0372]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g5-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.5434, -0.5901], [ 0.5241, -0.5419], [ 0.3922, -0.3747], [ 0.2414, -0.2652], ...-0.6070], [ 0.7640, -0.8502], [ 0.5522, -0.5358], [-0.0091, 0.0380]], grad_fn=) rhs_data = tensor([[-0.1240, -0.0303], [-0.1103, -0.1254], [-0.1058, -0.1557], [-0.0941, -0.1944], ... -0.2634], [-0.0852, -0.2465], [-0.1101, -0.1272], [-0.0804, -0.2778]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g6-idtype0] __________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.1677, 0.4548], [-0.4094, -0.4531], [ 0.2504, 0.6026], [ 0.0457, 0.2744], [-0.0430, 0.1149], [ 0.0645, 0.3395]], grad_fn=) rhs_data = tensor([[ 0.4264, -0.1747], [ 0.4726, -0.2277], [ 0.4874, -0.2363]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g6-idtype1] __________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.5529, -1.0355], [-1.3675, 0.4194], [ 0.1287, -0.8793], [-0.1844, -0.4283], [ 0.8775, -1.2841], [ 0.3139, -0.8368]], grad_fn=) rhs_data = tensor([[ 0.1153, -0.3116], [ 0.2156, -0.2842], [ 0.2108, -0.2907]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g7-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.7240, -1.0462], [ 0.3820, -0.6722], [ 0.6814, -1.0605], [ 0.5390, -0.8234], ...-0.4344], [ 0.2305, -0.4538], [ 0.1931, -0.5229], [ 0.1417, -0.4344]], grad_fn=) rhs_data = tensor([[-0.0488, -0.0816], [-0.4168, -0.0973], [-0.2214, -0.0750], [-0.1562, -0.0219], ... -0.0837], [-0.2122, -0.0864], [-0.1754, -0.1021], [-0.0857, -0.0861]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[1-g7-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.1321, -1.3864], [ 0.4641, -0.7318], [ 0.5015, -0.5702], [-0.1072, -0.3683], ...-0.7871], [ 0.1309, 0.7834], [ 0.0371, -1.2877], [ 0.7487, -1.1976]], grad_fn=) rhs_data = tensor([[ 0.1121, -0.1911], [ 0.0757, -0.2146], [ 0.1208, -0.2143], [ 0.1944, -0.1423], ... -0.2241], [ 0.1900, -0.1446], [ 0.2349, -0.1105], [ 0.0994, -0.2069]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g0-idtype0] __________________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int32, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.3601, 1.3875], [ 0.2260, 1.2157]], grad_fn=) rhs_data = tensor([[ 0.0706, 0.2858], [-0.0163, 0.3519], [ 0.2913, 0.1362], [ 0.2742, 0.1458], ... -0.0076], [ 0.3882, 0.0806], [-0.0010, 0.3395], [ 0.1611, 0.2151]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g0-idtype1] __________________________ g = Graph(num_nodes={'_U': 2, '_V': 4}, num_edges={('_U', '_E', '_V'): 8}, metagraph=[('_U', '_V', '_E')]) idtype = torch.int64, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.1212, 1.0092], [0.3330, 0.2156]], grad_fn=) rhs_data = tensor([[-0.1770, 0.1776], [-0.2192, 0.2054], [-0.1689, 0.2583], [-0.1929, 0.1956], ... 0.2251], [-0.1899, 0.1061], [-0.2263, 0.3646], [-0.1952, 0.1148]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g1-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hv': Scheme(shap...32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.9417, -0.9901], [ 0.0051, 0.0779], [ 1.8375, 0.3163], [ 0.6293, -0.8953], ...-1.6287], [-0.1944, 0.4275], [ 0.3914, -1.1927], [ 0.1526, 0.2306]], grad_fn=) rhs_data = tensor([[-0.1140, 0.4099], [-0.1283, 0.2489], [-0.1456, 0.2014], [-0.1062, 0.3335], ... 0.2252], [-0.0057, 0.5714], [-0.0824, 0.2928], [-0.1410, 0.3006]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g1-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hv': Scheme(shap...32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.6605, -0.9031], [ 0.7032, -0.6744], [ 0.9075, -1.0427], [ 1.3722, -0.7992], ...-0.2457], [ 0.7234, 0.6957], [-0.5191, 0.4515], [ 1.1244, 0.1045]], grad_fn=) rhs_data = tensor([[ 0.4939, 0.1642], [ 0.7238, -0.0041], [ 1.1079, -0.2024], [ 0.9937, -0.1238], ... 0.1780], [ 0.3609, 0.2623], [ 0.2757, 0.3295], [ 0.5029, 0.1205]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g2-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.6089, -0.5040], [-0.6448, -0.2721], [-0.6322, -0.8929], [-0.6227, -0.5643], ...-0.4440], [-0.6397, -0.1292], [-0.7402, -1.1689], [-0.6188, -0.1321]], grad_fn=) rhs_data = tensor([[ 0.3053, -0.2366], [ 0.3231, -0.2165], [ 0.3293, -0.2146], [ 0.3204, -0.2247], ... -0.1773], [ 0.3335, -0.1930], [ 0.3335, -0.2011], [ 0.2740, -0.2644]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g2-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.4588, 0.0313], [-0.5335, -0.0400], [-0.3967, 0.5772], [-0.1241, 0.5410], ...-0.3400], [ 0.4190, 0.3312], [ 1.2772, -1.4966], [ 0.7217, -0.4742]], grad_fn=) rhs_data = tensor([[ 0.0317, 0.5005], [ 0.0516, 0.5524], [ 0.0077, 0.4559], [-0.0283, 0.4155], ... 0.3351], [-0.0642, 0.3377], [-0.0470, 0.3476], [-0.0184, 0.4145]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g3-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hv': Scheme(shap... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.1415, 0.3143], [-0.5523, -0.1056], [-0.6425, 1.7113], [ 0.0089, -0.0386], ...-0.1144], [-0.4237, 0.2457], [-0.6992, 0.7787], [-0.6499, 0.9044]], grad_fn=) rhs_data = tensor([[-0.0592, -0.2300], [-0.0439, -0.2201], [-0.0511, -0.2210], [-0.1340, -0.3341], ... -0.2747], [-0.0295, -0.1982], [-0.0683, -0.2408], [-0.0857, -0.2714]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g3-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'hv': Scheme(shap... edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.4380, -0.4481], [ 0.3606, -1.0650], [ 0.1241, -0.3717], [ 1.2115, -3.1293], ...-1.1628], [ 0.1798, -1.0541], [-0.2272, -1.0274], [-0.0821, 0.1897]], grad_fn=) rhs_data = tensor([[-0.2521, 0.0691], [-0.2956, -0.0417], [-0.3016, -0.0219], [-0.2817, 0.0363], ... 0.0504], [-0.2361, 0.0184], [-0.1943, 0.1093], [-0.2175, 0.0664]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g4-idtype0] __________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.2628, 1.0869], [ 0.6771, 0.6082], [ 0.5164, 0.7393], [-0.6578, 1.3990], ... 1.0551], [ 1.1883, 0.5425], [ 1.1203, 0.0504], [ 0.7922, 0.9932]], grad_fn=) rhs_data = tensor([[-0.3165, 0.2238], [-0.2727, 0.2404], [-0.2912, 0.2499], [-0.2670, 0.2527], ... 0.2024], [-0.3545, 0.1823], [-0.3255, 0.2262], [-0.2844, 0.2451]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g4-idtype1] __________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.4941, 0.4901], [ 0.4641, 0.4366], [ 0.5565, 0.7978], [ 0.4979, 0.4381], ... 0.8441], [ 0.5789, 0.7933], [ 0.5945, 0.9087], [ 0.7685, 1.5726]], grad_fn=) rhs_data = tensor([[ 0.0670, 0.0383], [ 0.1258, 0.1065], [-0.0134, -0.0965], [ 0.1072, 0.0710], ... 0.1001], [ 0.1355, 0.1150], [ 0.1127, 0.0859], [ 0.0716, 0.0386]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g5-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.0736, 0.4641], [ 1.9047, -1.5977], [ 0.3181, 0.2111], [ 0.1287, 0.3254], ...-0.8896], [ 0.0654, 0.4333], [ 0.2686, 0.3084], [ 0.2286, 0.2036]], grad_fn=) rhs_data = tensor([[ 0.0152, -0.2629], [-0.0362, -0.2077], [-0.0189, -0.2596], [-0.1453, -0.1980], ... -0.2746], [ 0.1936, -0.3303], [ 0.0974, -0.2755], [ 0.1182, -0.2668]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g5-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.9200, -0.2535], [-0.7085, -0.2813], [ 0.3873, -0.0866], [-0.5302, -0.8738], ...-1.1078], [-0.0254, -0.8007], [-0.0522, -0.1383], [ 0.3218, -0.3205]], grad_fn=) rhs_data = tensor([[ 0.1152, -0.2811], [ 0.1181, -0.1706], [ 0.1433, -0.2596], [ 0.1307, -0.1279], ... -0.1837], [ 0.1467, -0.1241], [ 0.0818, -0.2227], [ 0.0809, -0.1871]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g6-idtype0] __________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int32 out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.1436, 0.4064], [ 0.6517, 1.6997], [-0.2646, 0.2199], [-1.1011, -1.2607], [-1.2178, -1.4861], [-0.4590, -0.1612]], grad_fn=) rhs_data = tensor([[0.0357, 0.2791], [0.1345, 0.2601], [0.0985, 0.2724]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g6-idtype1] __________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3), idtype = torch.int64 out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.2804, 0.1498], [-0.5499, -0.6719], [ 0.2159, -0.0975], [ 0.0350, -0.2297], [ 0.7875, 0.5588], [ 0.6448, 0.5562]], grad_fn=) rhs_data = tensor([[ 0.0125, 0.2376], [ 0.2731, -0.0329], [ 0.0458, 0.2169]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g7-idtype0] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.3197, -1.0854], [ 0.2007, -0.8984], [-0.0698, -0.5147], [ 0.1016, -0.4987], ...-0.5964], [-0.0471, -0.5245], [ 0.1367, -0.7330], [-0.3348, -0.7299]], grad_fn=) rhs_data = tensor([[ 0.3022, -0.2503], [ 0.3084, -0.2160], [ 0.3207, -0.1390], [ 0.3032, -0.2470], ... -0.2428], [ 0.2802, -0.3631], [ 0.2809, -0.3598], [ 0.3133, -0.1875]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_cf_conv[3-g7-idtype1] __________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'hv': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 3 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 3]) def test_cf_conv(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) cfconv = nn.CFConv(node_in_feats=2, edge_in_feats=3, hidden_feats=2, out_feats=out_dim) ctx = F.ctx() if F.gpu_ctx(): cfconv = cfconv.to(ctx) src_feats = F.randn((g.number_of_src_nodes(), 2)) edge_feats = F.randn((g.number_of_edges(), 3)) > h = cfconv(g, src_feats, edge_feats) tests\pytorch\test_nn.py:1131: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\cfconv.py:141: in forward g.update_all(fn.u_mul_e('hv', 'he', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[0.4647, 1.6154], [0.5008, 0.4884], [0.4233, 0.0997], [0.4943, 0.2448], [0.501....5051, 0.7847], [0.5804, 1.2393], [0.4814, 0.6121], [0.4941, 0.0775]], grad_fn=) rhs_data = tensor([[ 0.5990, -0.0943], [ 0.2034, -0.1971], [ 0.0647, -0.2374], [ 0.6960, -0.0682], ... -0.1513], [ 0.4471, -0.1353], [ 0.3549, -0.1566], [ 0.2290, -0.1916]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[False-sum-idtype0] _____________________ agg = 'sum', idtype = torch.int32, canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1480, -0.2703, -1.3044], [ 1.4912, -0.3858, -0.5647]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[False-sum-idtype1] _____________________ agg = 'sum', idtype = torch.int64, canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1559, 1.1797, -0.2209], [ 0.1136, -0.5057, -0.6093]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[False-max-idtype0] _____________________ agg = 'max', idtype = torch.int32, canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6523, -1.2169, -0.0322], [ 0.3153, 1.4738, -0.4358]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[False-max-idtype1] _____________________ agg = 'max', idtype = torch.int64, canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1729, -0.1719, 0.7675], [ 0.4450, 0.1347, -0.5818]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[False-min-idtype0] _____________________ agg = 'min', idtype = torch.int32, canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.0833, 0.9648, -0.7251], [-0.2482, -1.1087, -0.2997]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[False-min-idtype1] _____________________ agg = 'min', idtype = torch.int64, canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0219, -0.1872, -1.8041], [-0.2160, -0.6605, -0.5506]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_hetero_conv[False-mean-idtype0] _____________________ agg = 'mean', idtype = torch.int32, canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2757, -0.7083, -0.6424], [-0.8338, -0.6223, 0.0424]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_hetero_conv[False-mean-idtype1] _____________________ agg = 'mean', idtype = torch.int64, canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8001, 0.6591, -1.7937], [ 0.4277, 1.2046, 0.8283]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_hetero_conv[False-stack-idtype0] ____________________ agg = 'stack', idtype = torch.int32, canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2573, -0.3374, 0.5159], [-0.5254, 0.7733, -0.4114]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_hetero_conv[False-stack-idtype1] ____________________ agg = 'stack', idtype = torch.int64, canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.8973, -1.4752, -0.3907], [-0.4775, 0.2110, -0.7121]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_hetero_conv[False-myagg-idtype0] ____________________ agg = , idtype = torch.int32 canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6001, 0.6006, -0.1172], [ 0.5491, -0.4640, 0.7142]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_hetero_conv[False-myagg-idtype1] ____________________ agg = , idtype = torch.int64 canonical_keys = False @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1008, -0.2457, -0.4536], [-0.7911, -0.0238, 0.3008]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[True-sum-idtype0] ______________________ agg = 'sum', idtype = torch.int32, canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1678, -0.5335, 0.0572], [ 0.6498, -0.7910, -1.5448]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[True-sum-idtype1] ______________________ agg = 'sum', idtype = torch.int64, canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5986, 0.0135, -0.9486], [-0.7777, 1.2475, 0.5733]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[True-max-idtype0] ______________________ agg = 'max', idtype = torch.int32, canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7883, -1.6155, -0.2600], [-0.9716, -0.0626, 0.3580]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[True-max-idtype1] ______________________ agg = 'max', idtype = torch.int64, canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5541, -0.2472, 0.7340], [-0.1801, 0.2593, -1.6536]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[True-min-idtype0] ______________________ agg = 'min', idtype = torch.int32, canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.0768, -0.2894, 1.5621], [ 1.3744, 0.1391, 0.1514]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[True-min-idtype1] ______________________ agg = 'min', idtype = torch.int64, canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.9012, -0.1916, -1.3540], [-0.7802, -1.0068, -0.9238]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[True-mean-idtype0] _____________________ agg = 'mean', idtype = torch.int32, canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1672, 1.3837, -1.2017], [ 0.2214, -0.7929, 0.1168]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _____________________ test_hetero_conv[True-mean-idtype1] _____________________ agg = 'mean', idtype = torch.int64, canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1348, 1.4200, 0.4892], [-0.5011, -0.9187, -0.3083]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_hetero_conv[True-stack-idtype0] _____________________ agg = 'stack', idtype = torch.int32, canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5550, -1.0242, 0.1190], [ 0.7740, 1.5207, -0.3976]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_hetero_conv[True-stack-idtype1] _____________________ agg = 'stack', idtype = torch.int64, canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.3978, -0.1320, 1.3649], [ 0.2784, 0.1259, -1.0251]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_hetero_conv[True-myagg-idtype0] _____________________ agg = , idtype = torch.int32 canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.1605, 0.0172, 0.0549], [-0.2916, -0.1524, -0.9087]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________ test_hetero_conv[True-myagg-idtype1] _____________________ agg = , idtype = torch.int64 canonical_keys = True @parametrize_idtype @pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg]) @pytest.mark.parametrize('canonical_keys', [False, True]) def test_hetero_conv(agg, idtype, canonical_keys): g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]), ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]), ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])}, idtype=idtype, device=F.ctx()) if not canonical_keys: conv = nn.HeteroGraphConv({ 'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True), 'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True), 'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) else: conv = nn.HeteroGraphConv({ ('user', 'follows', 'user'): nn.GraphConv(2, 3, allow_zero_in_degree=True), ('user', 'plays', 'game'): nn.GraphConv(2, 4, allow_zero_in_degree=True), ('store', 'sells', 'game'): nn.GraphConv(3, 4, allow_zero_in_degree=True)}, agg) conv = conv.to(F.ctx()) # test pickle th.save(conv, tmp_buffer) uf = F.randn((4, 2)) gf = F.randn((4, 4)) sf = F.randn((2, 3)) > h = conv(g, {'user': uf, 'game': gf, 'store': sf}) tests\pytorch\test_nn.py:1178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\hetero.py:205: in forward **mod_kwargs.get(etype, {})) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5603, 0.0219, -0.7322], [ 0.1819, -0.7850, -0.3265]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2950], [-0.1006], [ 0.4147]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.6360], [0.3688], [0.3020]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3144], [-0.7383], [ 0.2948]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.9873], [-0.5664], [-0.3057]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.2969], [ 0.4126], [-0.0673]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.2024], [1.0371], [0.8049]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum', lhs_data = tensor([[0.3815]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum', lhs_data = tensor([[-0.4402]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1401], [-0.6405], [ 0.1352]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 1.0955], [ 0.5437], [-0.7968]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0682], [0.1455], [0.4357]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[1-g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 1 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.0359], [0.2529], [0.1073]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g0-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1020, -0.6491], [-0.4660, 0.1965], [ 0.2674, 1.0945]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g0-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4145, 0.2244], [-0.7063, 0.4699], [-1.1474, 1.1299]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g1-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.1387, -0.0750], [ 0.0180, 0.0081], [ 1.2263, -1.0636]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g1-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.7391, -0.7305], [ 0.7804, 0.4287], [ 0.9455, -0.2275]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g2-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6991, -0.0329], [ 0.7869, -0.1756], [ 0.4590, -1.3577]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g2-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.6777, 1.9223], [-0.1629, -0.5304], [ 0.4873, 0.6564]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g3-idtype0] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum', lhs_data = tensor([[ 0.8349, -0.4577]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g3-idtype1] _______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum', lhs_data = tensor([[ 0.4103, -0.3403]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g4-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.2174, 0.3459], [ 0.0814, -0.0709], [-0.4938, 0.4150]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g4-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8571, 0.1137], [ 0.0281, 1.6331], [-0.1166, 0.0753]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g5-idtype0] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int32, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.3272, 0.0935], [ 0.2792, -0.4684], [ 0.2756, -0.2019]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _______________________ test_gnnexplainer[2-g5-idtype1] _______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={} edata_schemes={}) idtype = torch.int64, out_dim = 2 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree'])) @pytest.mark.parametrize('out_dim', [1, 2]) def test_gnnexplainer(g, idtype, out_dim): g = g.astype(idtype).to(F.ctx()) feat = F.randn((g.num_nodes(), 5)) class Model(th.nn.Module): def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None def forward(self, graph, feat, eweight=None): with graph.local_scope(): feat = self.linear(feat) graph.ndata['h'] = feat if eweight is None: graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) else: graph.edata['w'] = eweight graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h')) if self.pool: return self.pool(graph, graph.ndata['h']) else: return graph.ndata['h'] # Explain node prediction model = Model(5, out_dim) model = model.to(F.ctx()) explainer = nn.GNNExplainer(model, num_hops=1) > new_center, sg, feat_mask, edge_mask = explainer.explain_node(0, g, feat) tests\pytorch\test_nn.py:1321: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python\dgl\nn\pytorch\explain\gnnexplainer.py:275: in explain_node logits = self.model(graph=sg, feat=feat, **kwargs) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) tests\pytorch\test_nn.py:1307: in forward graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.8294, -0.0232], [ 0.2900, -0.1152], [ 0.1495, -0.7319]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________________ test_twirls _________________________________ def test_twirls(): g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3])) feat = th.ones(6, 10) conv = nn.TWIRLSConv(10, 2, 128, prop_step = 64) > res = conv(g , feat) tests\pytorch\test_nn.py:1392: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\twirlsconv.py:194: in forward x = self.unfolding(graph, x) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\twirlsconv.py:561: in forward Y = layer(g, Y, X, self.alp, self.lam) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\twirlsconv.py:249: in forward + alp * D_power_bias_X(graph, X, -1, lam, 1 - lam) python\dgl\nn\pytorch\conv\twirlsconv.py:218: in _prop Y = AX(graph, Y) python\dgl\nn\pytorch\conv\twirlsconv.py:413: in AX fn.u_mul_e("h", "w", "m"), fn.sum("m", "h"), python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:323: in invoke_gspmm z = op(graph, x, y) python\dgl\ops\spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.3929, 0.0664, 0.2063, 0.1665, -0.0047, 0.0481, 0.2710, 0.1409, -0.2799, 0.0362, 0.3737, ...2714, 0.1337, 0.2547, 0.5175, -0.8311, 0.1049, -0.2532, -0.4237, 0.7530]], grad_fn=) rhs_data = tensor([[1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.], [1.]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ____________________________ test_hgt[1-4-idtype0] ____________________________ idtype = torch.int32, in_size = 4, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('in_size', [4]) @pytest.mark.parametrize('num_heads', [1]) def test_hgt(idtype, in_size, num_heads): dev = F.ctx() num_etypes = 5 num_ntypes = 2 head_size = in_size // num_heads g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.01)) g = g.astype(idtype).to(dev) etype = th.tensor([i % num_etypes for i in range(g.num_edges())]).to(dev) ntype = th.tensor([i % num_ntypes for i in range(g.num_nodes())]).to(dev) x = th.randn(g.num_nodes(), in_size).to(dev) m = nn.HGTConv(in_size, head_size, num_heads, num_ntypes, num_etypes).to(dev) > y = m(g, x, ntype, etype) tests\pytorch\test_nn.py:1434: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\hgtconv.py:136: in forward g.edata['m'] = g.edata['m'] * edge_softmax(g, g.edata['a']).unsqueeze(-1) python\dgl\ops\edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[-0.1500], [ 0.1758], [-0.0766], [-0.2495], [-0.0076], [-0.1007], ... [-0.2692], [-0.1459], [ 0.1354], [ 0.0796], [ 0.1519]], grad_fn=) eids = '__ALL__', norm_by = 'dst' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:770: TypeError ____________________________ test_hgt[1-4-idtype1] ____________________________ idtype = torch.int64, in_size = 4, num_heads = 1 @parametrize_idtype @pytest.mark.parametrize('in_size', [4]) @pytest.mark.parametrize('num_heads', [1]) def test_hgt(idtype, in_size, num_heads): dev = F.ctx() num_etypes = 5 num_ntypes = 2 head_size = in_size // num_heads g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.01)) g = g.astype(idtype).to(dev) etype = th.tensor([i % num_etypes for i in range(g.num_edges())]).to(dev) ntype = th.tensor([i % num_ntypes for i in range(g.num_nodes())]).to(dev) x = th.randn(g.num_nodes(), in_size).to(dev) m = nn.HGTConv(in_size, head_size, num_heads, num_ntypes, num_etypes).to(dev) > y = m(g, x, ntype, etype) tests\pytorch\test_nn.py:1434: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\hgtconv.py:136: in forward g.edata['m'] = g.edata['m'] * edge_softmax(g, g.edata['a']).unsqueeze(-1) python\dgl\ops\edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[-1.1611e-01], [ 1.0064e-01], [ 1.2103e-01], [ 1.0987e-01], [ 1.8929e-03], ..., [-1.0278e-01], [ 2.1266e-01], [ 6.4873e-02], [ 2.8267e-02]], grad_fn=) eids = '__ALL__', norm_by = 'dst' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:770: TypeError _________________________ test_group_rev_res[idtype0] _________________________ idtype = torch.int32 @parametrize_idtype def test_group_rev_res(idtype): dev = F.ctx() num_nodes = 5 num_edges = 20 feats = 32 groups = 2 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, feats).to(dev) conv = nn.GraphConv(feats // groups, feats // groups) model = nn.GroupRevRes(conv, groups).to(dev) > result = model(g, h) tests\pytorch\test_nn.py:1545: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\grouprevres.py:235: in forward *(args + tuple([p for p in self.parameters() if p.requires_grad]))) python\dgl\nn\pytorch\conv\grouprevres.py:29: in forward outputs = ctx.fn(*x).detach_() python\dgl\nn\pytorch\conv\grouprevres.py:182: in _forward y_in = xs[i] + self.gnn_modules[i](g, y_in, *args_chunks[i]) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5902, 0.0630, 0.3305, 0.9670, -0.3103, -0.0413, 0.3273, 2.3613, -0.2182, 0.3764, 0.0723, ... 0.2139, -0.6448, 0.0698, -0.0153, 1.0399, -0.7878, -0.4574, 0.1666, 0.0027, -0.1664, 0.0547, 0.2055]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_group_rev_res[idtype1] _________________________ idtype = torch.int64 @parametrize_idtype def test_group_rev_res(idtype): dev = F.ctx() num_nodes = 5 num_edges = 20 feats = 32 groups = 2 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, feats).to(dev) conv = nn.GraphConv(feats // groups, feats // groups) model = nn.GroupRevRes(conv, groups).to(dev) > result = model(g, h) tests\pytorch\test_nn.py:1545: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\grouprevres.py:235: in forward *(args + tuple([p for p in self.parameters() if p.requires_grad]))) python\dgl\nn\pytorch\conv\grouprevres.py:29: in forward outputs = ctx.fn(*x).detach_() python\dgl\nn\pytorch\conv\grouprevres.py:182: in _forward y_in = xs[i] + self.gnn_modules[i](g, y_in, *args_chunks[i]) c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\graphconv.py:428: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1361, 0.6922, -0.7377, -0.7217, 0.5052, -0.0251, -0.4911, 0.6332, -0.0894, 0.0715, 0.1784, ... 0.0028, 0.8519, -0.9331, -0.2509, 0.0136, 0.1051, 0.6944, 0.6106, -0.4397, 0.6314, 0.3690, -0.3548]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________________ test_egnn_conv[16-16-16-16] _________________________ in_size = 16, hidden_size = 16, out_size = 16, edge_feat_size = 16 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.1555, 1.3800, -1.3861], [ 0.6016, -0.5539, -1.4905], [ 0.5590, -0.8129, 0.7952], [-0.1751, 0.4680, -0.3326], [ 1.4519, -0.1267, 0.9008]]) rhs_data = tensor([[-0.1555, -1.3800, 1.3861], [-0.6016, 0.5539, 1.4905], [-0.5590, 0.8129, -0.7952], [ 0.1751, -0.4680, 0.3326], [-1.4519, 0.1267, -0.9008]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[16-16-16-32] _________________________ in_size = 32, hidden_size = 16, out_size = 16, edge_feat_size = 16 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.5551, 0.1841, -0.4555], [ 1.0061, 0.7136, 0.6413], [ 0.1150, -0.9536, 0.2384], [-0.2700, -1.7394, -0.4870], [-1.6799, 1.3412, -1.0390]]) rhs_data = tensor([[-0.5551, -0.1841, 0.4555], [-1.0061, -0.7136, -0.6413], [-0.1150, 0.9536, -0.2384], [ 0.2700, 1.7394, 0.4870], [ 1.6799, -1.3412, 1.0390]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[16-16-32-16] _________________________ in_size = 16, hidden_size = 32, out_size = 16, edge_feat_size = 16 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-5.7356e-01, -1.1111e+00, -2.0557e-01], [ 1.4431e+00, -1.0450e+00, 1.1679e-03], [-5.0074e-01...4e+00, -3.7470e-01], [ 8.6229e-01, -2.4098e-01, 7.6695e-01], [-9.1489e-01, -7.4053e-01, -2.6023e-01]]) rhs_data = tensor([[ 5.7356e-01, 1.1111e+00, 2.0557e-01], [-1.4431e+00, 1.0450e+00, -1.1679e-03], [ 5.0074e-01...4e+00, 3.7470e-01], [-8.6229e-01, 2.4098e-01, -7.6695e-01], [ 9.1489e-01, 7.4053e-01, 2.6023e-01]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[16-16-32-32] _________________________ in_size = 32, hidden_size = 32, out_size = 16, edge_feat_size = 16 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.1025, 0.3959, 1.3502], [ 0.4283, -1.9424, -0.3929], [ 0.1560, -0.2212, 0.0080], [-0.6130, 0.3075, 0.4483], [ 0.1573, -0.1212, 1.2261]]) rhs_data = tensor([[-0.1025, -0.3959, -1.3502], [-0.4283, 1.9424, 0.3929], [-0.1560, 0.2212, -0.0080], [ 0.6130, -0.3075, -0.4483], [-0.1573, 0.1212, -1.2261]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[16-32-16-16] _________________________ in_size = 16, hidden_size = 16, out_size = 32, edge_feat_size = 16 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.2653, 1.2890, 0.6706], [ 0.6051, 1.2266, -0.2979], [-1.1657, 0.0117, -0.2497], [ 0.8012, 1.4983, 0.5824], [-0.2324, -1.0041, 0.7958]]) rhs_data = tensor([[ 0.2653, -1.2890, -0.6706], [-0.6051, -1.2266, 0.2979], [ 1.1657, -0.0117, 0.2497], [-0.8012, -1.4983, -0.5824], [ 0.2324, 1.0041, -0.7958]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[16-32-16-32] _________________________ in_size = 32, hidden_size = 16, out_size = 32, edge_feat_size = 16 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.3298, 0.2859, 0.9299], [ 1.4075, -1.2248, 0.3947], [ 0.5814, 0.3582, -1.2485], [-0.1334, 1.6807, -1.0467], [ 0.2541, 0.4382, 0.5471]]) rhs_data = tensor([[ 0.3298, -0.2859, -0.9299], [-1.4075, 1.2248, -0.3947], [-0.5814, -0.3582, 1.2485], [ 0.1334, -1.6807, 1.0467], [-0.2541, -0.4382, -0.5471]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[16-32-32-16] _________________________ in_size = 16, hidden_size = 32, out_size = 32, edge_feat_size = 16 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.6349, -0.2510, -0.9359], [-0.1156, -0.9172, 1.0636], [-0.0240, 1.0405, -0.1474], [-1.1110, 1.1568, 0.4864], [-0.8727, -0.0653, -0.1393]]) rhs_data = tensor([[ 0.6349, 0.2510, 0.9359], [ 0.1156, 0.9172, -1.0636], [ 0.0240, -1.0405, 0.1474], [ 1.1110, -1.1568, -0.4864], [ 0.8727, 0.0653, 0.1393]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[16-32-32-32] _________________________ in_size = 32, hidden_size = 32, out_size = 32, edge_feat_size = 16 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.0763, 0.6033, 1.4741], [-0.7192, 0.4424, 1.2418], [-0.0951, -0.3115, -1.4940], [ 1.1853, 0.1006, -0.5396], [ 0.3217, -0.4816, 0.4737]]) rhs_data = tensor([[-1.0763, -0.6033, -1.4741], [ 0.7192, -0.4424, -1.2418], [ 0.0951, 0.3115, 1.4940], [-1.1853, -0.1006, 0.5396], [-0.3217, 0.4816, -0.4737]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[10-16-16-16] _________________________ in_size = 16, hidden_size = 16, out_size = 16, edge_feat_size = 10 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.4903, 0.9804, -0.8172], [-0.0522, -1.0841, 0.7529], [ 0.3059, -1.4338, 0.9355], [-1.2628, 2.2440, -0.2803], [-0.6934, 1.0953, -0.3488]]) rhs_data = tensor([[-0.4903, -0.9804, 0.8172], [ 0.0522, 1.0841, -0.7529], [-0.3059, 1.4338, -0.9355], [ 1.2628, -2.2440, 0.2803], [ 0.6934, -1.0953, 0.3488]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[10-16-16-32] _________________________ in_size = 32, hidden_size = 16, out_size = 16, edge_feat_size = 10 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.7751, -2.3237, 1.1517], [ 2.0998, 1.8434, 0.7605], [ 0.5685, 0.1157, -0.9222], [ 0.7898, -0.5610, 1.1371], [-0.9465, 2.0115, -0.0580]]) rhs_data = tensor([[ 0.7751, 2.3237, -1.1517], [-2.0998, -1.8434, -0.7605], [-0.5685, -0.1157, 0.9222], [-0.7898, 0.5610, -1.1371], [ 0.9465, -2.0115, 0.0580]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[10-16-32-16] _________________________ in_size = 16, hidden_size = 32, out_size = 16, edge_feat_size = 10 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.9564, -0.4323, -1.0107], [-0.2245, -0.9657, 1.4202], [-0.4110, -2.6702, -1.1381], [-0.2941, -0.2387, -2.0593], [ 0.2100, 0.2850, -0.8630]]) rhs_data = tensor([[-0.9564, 0.4323, 1.0107], [ 0.2245, 0.9657, -1.4202], [ 0.4110, 2.6702, 1.1381], [ 0.2941, 0.2387, 2.0593], [-0.2100, -0.2850, 0.8630]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[10-16-32-32] _________________________ in_size = 32, hidden_size = 32, out_size = 16, edge_feat_size = 10 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.4590, 1.5826, 0.8992], [-1.5265, 0.2053, -0.3400], [ 0.1323, -0.1550, -1.8860], [ 0.4969, 0.2304, 1.4859], [-0.1446, -0.0718, 0.6177]]) rhs_data = tensor([[ 1.4590, -1.5826, -0.8992], [ 1.5265, -0.2053, 0.3400], [-0.1323, 0.1550, 1.8860], [-0.4969, -0.2304, -1.4859], [ 0.1446, 0.0718, -0.6177]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[10-32-16-16] _________________________ in_size = 16, hidden_size = 16, out_size = 32, edge_feat_size = 10 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.1856, 0.2390, 0.3081], [ 0.7310, -1.2469, -0.0110], [ 0.3058, 0.2911, 0.0168], [ 1.4814, 0.2427, -0.1365], [ 0.2853, 1.7843, 0.3019]]) rhs_data = tensor([[ 0.1856, -0.2390, -0.3081], [-0.7310, 1.2469, 0.0110], [-0.3058, -0.2911, -0.0168], [-1.4814, -0.2427, 0.1365], [-0.2853, -1.7843, -0.3019]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[10-32-16-32] _________________________ in_size = 32, hidden_size = 16, out_size = 32, edge_feat_size = 10 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.9435, 0.0035, 2.0401], [ 1.2487, -0.1685, 1.6460], [ 0.5154, 0.9192, -0.2428], [ 0.6818, 0.6948, -0.2279], [-0.5829, 0.1165, 0.6121]]) rhs_data = tensor([[ 0.9435, -0.0035, -2.0401], [-1.2487, 0.1685, -1.6460], [-0.5154, -0.9192, 0.2428], [-0.6818, -0.6948, 0.2279], [ 0.5829, -0.1165, -0.6121]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[10-32-32-16] _________________________ in_size = 16, hidden_size = 32, out_size = 32, edge_feat_size = 10 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.8146, -1.2644, 0.9078], [ 1.2332, -0.2055, -0.0797], [-1.8553, 0.6014, -0.2219], [ 1.1794, 0.9758, -1.7981], [ 2.3522, 0.2859, 0.8472]]) rhs_data = tensor([[-0.8146, 1.2644, -0.9078], [-1.2332, 0.2055, 0.0797], [ 1.8553, -0.6014, 0.2219], [-1.1794, -0.9758, 1.7981], [-2.3522, -0.2859, -0.8472]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[10-32-32-32] _________________________ in_size = 32, hidden_size = 32, out_size = 32, edge_feat_size = 10 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.6394, -0.9289, 1.4670], [-1.7976, 0.8076, 1.2068], [-0.7799, -0.0431, -0.1128], [-0.7640, 0.3676, -1.4259], [-0.8048, -0.1220, -0.5056]]) rhs_data = tensor([[-0.6394, 0.9289, -1.4670], [ 1.7976, -0.8076, -1.2068], [ 0.7799, 0.0431, 0.1128], [ 0.7640, -0.3676, 1.4259], [ 0.8048, 0.1220, 0.5056]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[0-16-16-16] __________________________ in_size = 16, hidden_size = 16, out_size = 16, edge_feat_size = 0 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.8452, -1.3729, 0.1664], [-0.0555, 0.6882, 2.4198], [ 0.1352, -0.5518, -0.0207], [ 0.1508, 2.4068, 0.1535], [-1.5185, 1.3602, 0.1981]]) rhs_data = tensor([[-0.8452, 1.3729, -0.1664], [ 0.0555, -0.6882, -2.4198], [-0.1352, 0.5518, 0.0207], [-0.1508, -2.4068, -0.1535], [ 1.5185, -1.3602, -0.1981]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[0-16-16-32] __________________________ in_size = 32, hidden_size = 16, out_size = 16, edge_feat_size = 0 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.1820, 0.4520, -0.1227], [-0.7415, -0.5571, 3.1673], [ 1.4870, -0.1696, -0.3700], [-1.0529, 1.6969, -0.0381], [ 0.2572, -0.7983, -0.7362]]) rhs_data = tensor([[ 1.1820, -0.4520, 0.1227], [ 0.7415, 0.5571, -3.1673], [-1.4870, 0.1696, 0.3700], [ 1.0529, -1.6969, 0.0381], [-0.2572, 0.7983, 0.7362]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[0-16-32-16] __________________________ in_size = 16, hidden_size = 32, out_size = 16, edge_feat_size = 0 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.5555, 0.4494, -0.0995], [-1.1580, -1.2035, -1.6830], [ 1.4791, -0.9554, 0.6536], [ 1.6538, -0.5048, 2.4620], [ 0.5669, 0.9360, -0.1988]]) rhs_data = tensor([[ 0.5555, -0.4494, 0.0995], [ 1.1580, 1.2035, 1.6830], [-1.4791, 0.9554, -0.6536], [-1.6538, 0.5048, -2.4620], [-0.5669, -0.9360, 0.1988]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[0-16-32-32] __________________________ in_size = 32, hidden_size = 32, out_size = 16, edge_feat_size = 0 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.3340, 0.1061, -0.3145], [ 1.6711, -0.5607, 0.6925], [ 0.5874, 1.1827, 0.6793], [-0.7498, -0.7626, 0.5196], [-0.1976, -0.8008, -0.0455]]) rhs_data = tensor([[ 0.3340, -0.1061, 0.3145], [-1.6711, 0.5607, -0.6925], [-0.5874, -1.1827, -0.6793], [ 0.7498, 0.7626, -0.5196], [ 0.1976, 0.8008, 0.0455]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[0-32-16-16] __________________________ in_size = 16, hidden_size = 16, out_size = 32, edge_feat_size = 0 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.8576, -0.0346, 0.2792], [ 0.0050, 0.3662, 0.1368], [-0.0447, -0.7263, -0.2495], [ 1.5286, 0.6768, -0.0672], [ 0.1181, -2.4297, 1.3914]]) rhs_data = tensor([[ 1.8576, 0.0346, -0.2792], [-0.0050, -0.3662, -0.1368], [ 0.0447, 0.7263, 0.2495], [-1.5286, -0.6768, 0.0672], [-0.1181, 2.4297, -1.3914]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[0-32-16-32] __________________________ in_size = 32, hidden_size = 16, out_size = 32, edge_feat_size = 0 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 0.1698, -0.6871, -0.1356], [ 0.6061, -0.8150, -0.0862], [-0.2394, -0.1918, 0.4498], [ 0.7842, 0.2546, -2.6836], [ 1.3082, 0.4661, -0.8588]]) rhs_data = tensor([[-0.1698, 0.6871, 0.1356], [-0.6061, 0.8150, 0.0862], [ 0.2394, 0.1918, -0.4498], [-0.7842, -0.2546, 2.6836], [-1.3082, -0.4661, 0.8588]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[0-32-32-16] __________________________ in_size = 16, hidden_size = 32, out_size = 32, edge_feat_size = 0 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.5666, 0.2358, 1.8300], [ 1.5686, -0.9704, 0.1244], [-0.7257, -1.6196, -0.7985], [-1.6828, 0.7457, 0.7924], [-0.1769, -1.4847, 0.8365]]) rhs_data = tensor([[ 0.5666, -0.2358, -1.8300], [-1.5686, 0.9704, -0.1244], [ 0.7257, 1.6196, 0.7985], [ 1.6828, -0.7457, -0.7924], [ 0.1769, 1.4847, -0.8365]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError _________________________ test_egnn_conv[0-32-32-32] __________________________ in_size = 32, hidden_size = 32, out_size = 32, edge_feat_size = 0 @pytest.mark.parametrize('in_size', [16, 32]) @pytest.mark.parametrize('hidden_size', [16, 32]) @pytest.mark.parametrize('out_size', [16, 32]) @pytest.mark.parametrize('edge_feat_size', [16, 10, 0]) def test_egnn_conv(in_size, hidden_size, out_size, edge_feat_size): dev = F.ctx() num_nodes = 5 num_edges = 20 g = dgl.rand_graph(num_nodes, num_edges).to(dev) h = th.randn(num_nodes, in_size).to(dev) x = th.randn(num_nodes, 3).to(dev) e = th.randn(num_edges, edge_feat_size).to(dev) model = nn.EGNNConv(in_size, hidden_size, out_size, edge_feat_size).to(dev) > model(g, h, x, e) tests\pytorch\test_nn.py:1561: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\conv\egnnconv.py:137: in forward graph.apply_edges(fn.u_sub_v('x', 'x', 'x_diff')) python\dgl\heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python\dgl\core.py:266: in invoke_gsddmm z = op(graph, x, y) python\dgl\ops\sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python\dgl\ops\sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.9018, -0.4783, 2.6359], [ 0.5444, 1.3759, -1.3350], [ 0.0092, 1.0113, -1.2648], [ 0.6986, 0.0140, -0.2936], [-0.3703, 0.8547, -0.1592]]) rhs_data = tensor([[ 1.9018, 0.4783, -2.6359], [-0.5444, -1.3759, 1.3350], [-0.0092, -1.0113, 1.2648], [-0.6986, -0.0140, 0.2936], [ 0.3703, -0.8547, 0.1592]]) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:731: TypeError __________________ test_label_prop[True-True-True-sym-0.0-3] __________________ k = 3, alpha = 0.0, norm_type = 'sym', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5774], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-sym-0.0-5] __________________ k = 5, alpha = 0.0, norm_type = 'sym', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.5774, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-sym-0.5-3] __________________ k = 3, alpha = 0.5, norm_type = 'sym', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 1.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.4472], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-sym-0.5-5] __________________ k = 5, alpha = 0.5, norm_type = 'sym', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-sym-1.0-3] __________________ k = 3, alpha = 1.0, norm_type = 'sym', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 0.5774, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-sym-1.0-5] __________________ k = 5, alpha = 1.0, norm_type = 'sym', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-row-0.0-3] __________________ k = 3, alpha = 0.0, norm_type = 'row', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-row-0.0-5] __________________ k = 5, alpha = 0.0, norm_type = 'row', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-row-0.5-3] __________________ k = 3, alpha = 0.5, norm_type = 'row', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-row-0.5-5] __________________ k = 5, alpha = 0.5, norm_type = 'row', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-row-1.0-3] __________________ k = 3, alpha = 1.0, norm_type = 'row', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError __________________ test_label_prop[True-True-True-row-1.0-5] __________________ k = 5, alpha = 1.0, norm_type = 'row', clamp = True, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-sym-0.0-3] __________________ k = 3, alpha = 0.0, norm_type = 'sym', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 0.5774, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-sym-0.0-5] __________________ k = 5, alpha = 0.0, norm_type = 'sym', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5774], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-sym-0.5-3] __________________ k = 3, alpha = 0.5, norm_type = 'sym', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.4472], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-sym-0.5-5] __________________ k = 5, alpha = 0.5, norm_type = 'sym', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-sym-1.0-3] __________________ k = 3, alpha = 1.0, norm_type = 'sym', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.7071], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-sym-1.0-5] __________________ k = 5, alpha = 1.0, norm_type = 'sym', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5774], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-row-0.0-3] __________________ k = 3, alpha = 0.0, norm_type = 'row', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-row-0.0-5] __________________ k = 5, alpha = 0.0, norm_type = 'row', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-row-0.5-3] __________________ k = 3, alpha = 0.5, norm_type = 'row', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-row-0.5-5] __________________ k = 5, alpha = 0.5, norm_type = 'row', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-row-1.0-3] __________________ k = 3, alpha = 1.0, norm_type = 'row', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-True-False-row-1.0-5] __________________ k = 5, alpha = 1.0, norm_type = 'row', clamp = False, normalize = True reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-sym-0.0-3] __________________ k = 3, alpha = 0.0, norm_type = 'sym', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-sym-0.0-5] __________________ k = 5, alpha = 0.0, norm_type = 'sym', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-sym-0.5-3] __________________ k = 3, alpha = 0.5, norm_type = 'sym', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.5774, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-sym-0.5-5] __________________ k = 5, alpha = 0.5, norm_type = 'sym', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5774], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-sym-1.0-3] __________________ k = 3, alpha = 1.0, norm_type = 'sym', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-sym-1.0-5] __________________ k = 5, alpha = 1.0, norm_type = 'sym', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5774, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.4472], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-row-0.0-3] __________________ k = 3, alpha = 0.0, norm_type = 'row', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-row-0.0-5] __________________ k = 5, alpha = 0.0, norm_type = 'row', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-row-0.5-3] __________________ k = 3, alpha = 0.5, norm_type = 'row', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-row-0.5-5] __________________ k = 5, alpha = 0.5, norm_type = 'row', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-row-1.0-3] __________________ k = 3, alpha = 1.0, norm_type = 'row', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-True-row-1.0-5] __________________ k = 5, alpha = 1.0, norm_type = 'row', clamp = True, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-sym-0.0-3] _________________ k = 3, alpha = 0.0, norm_type = 'sym', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-sym-0.0-5] _________________ k = 5, alpha = 0.0, norm_type = 'sym', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5774], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-sym-0.5-3] _________________ k = 3, alpha = 0.5, norm_type = 'sym', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.7071, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-sym-0.5-5] _________________ k = 5, alpha = 0.5, norm_type = 'sym', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5774, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-sym-1.0-3] _________________ k = 3, alpha = 1.0, norm_type = 'sym', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-sym-1.0-5] _________________ k = 5, alpha = 1.0, norm_type = 'sym', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-row-0.0-3] _________________ k = 3, alpha = 0.0, norm_type = 'row', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-row-0.0-5] _________________ k = 5, alpha = 0.0, norm_type = 'row', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-row-0.5-3] _________________ k = 3, alpha = 0.5, norm_type = 'row', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-row-0.5-5] _________________ k = 5, alpha = 0.5, norm_type = 'row', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-row-1.0-3] _________________ k = 3, alpha = 1.0, norm_type = 'row', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[True-False-False-row-1.0-5] _________________ k = 5, alpha = 1.0, norm_type = 'row', clamp = False, normalize = False reset = True @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-sym-0.0-3] __________________ k = 3, alpha = 0.0, norm_type = 'sym', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-sym-0.0-5] __________________ k = 5, alpha = 0.0, norm_type = 'sym', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.7071, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-sym-0.5-3] __________________ k = 3, alpha = 0.5, norm_type = 'sym', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-sym-0.5-5] __________________ k = 5, alpha = 0.5, norm_type = 'sym', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5774], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-sym-1.0-3] __________________ k = 3, alpha = 1.0, norm_type = 'sym', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5774, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.4472], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-sym-1.0-5] __________________ k = 5, alpha = 1.0, norm_type = 'sym', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.7071], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-row-0.0-3] __________________ k = 3, alpha = 0.0, norm_type = 'row', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-row-0.0-5] __________________ k = 5, alpha = 0.0, norm_type = 'row', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-row-0.5-3] __________________ k = 3, alpha = 0.5, norm_type = 'row', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-row-0.5-5] __________________ k = 5, alpha = 0.5, norm_type = 'row', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-row-1.0-3] __________________ k = 3, alpha = 1.0, norm_type = 'row', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-True-row-1.0-5] __________________ k = 5, alpha = 1.0, norm_type = 'row', clamp = True, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-sym-0.0-3] _________________ k = 3, alpha = 0.0, norm_type = 'sym', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-sym-0.0-5] _________________ k = 5, alpha = 0.0, norm_type = 'sym', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-sym-0.5-3] _________________ k = 3, alpha = 0.5, norm_type = 'sym', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-sym-0.5-5] _________________ k = 5, alpha = 0.5, norm_type = 'sym', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5774], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-sym-1.0-3] _________________ k = 3, alpha = 1.0, norm_type = 'sym', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.4472], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-sym-1.0-5] _________________ k = 5, alpha = 1.0, norm_type = 'sym', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-row-0.0-3] _________________ k = 3, alpha = 0.0, norm_type = 'row', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-row-0.0-5] _________________ k = 5, alpha = 0.0, norm_type = 'row', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-row-0.5-3] _________________ k = 3, alpha = 0.5, norm_type = 'row', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-row-0.5-5] _________________ k = 5, alpha = 0.5, norm_type = 'row', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-row-1.0-3] _________________ k = 3, alpha = 1.0, norm_type = 'row', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-True-False-row-1.0-5] _________________ k = 5, alpha = 1.0, norm_type = 'row', clamp = False, normalize = True reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-sym-0.0-3] _________________ k = 3, alpha = 0.0, norm_type = 'sym', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5774], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-sym-0.0-5] _________________ k = 5, alpha = 0.0, norm_type = 'sym', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.4472], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-sym-0.5-3] _________________ k = 3, alpha = 0.5, norm_type = 'sym', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.7071, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-sym-0.5-5] _________________ k = 5, alpha = 0.5, norm_type = 'sym', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-sym-1.0-3] _________________ k = 3, alpha = 1.0, norm_type = 'sym', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-sym-1.0-5] _________________ k = 5, alpha = 1.0, norm_type = 'sym', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-row-0.0-3] _________________ k = 3, alpha = 0.0, norm_type = 'row', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-row-0.0-5] _________________ k = 5, alpha = 0.0, norm_type = 'row', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-row-0.5-3] _________________ k = 3, alpha = 0.5, norm_type = 'row', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-row-0.5-5] _________________ k = 5, alpha = 0.5, norm_type = 'row', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-row-1.0-3] _________________ k = 3, alpha = 1.0, norm_type = 'row', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError _________________ test_label_prop[False-False-True-row-1.0-5] _________________ k = 5, alpha = 1.0, norm_type = 'row', clamp = True, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-sym-0.0-3] _________________ k = 3, alpha = 0.0, norm_type = 'sym', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-sym-0.0-5] _________________ k = 5, alpha = 0.0, norm_type = 'sym', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4472, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-sym-0.5-3] _________________ k = 3, alpha = 0.5, norm_type = 'sym', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5000, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-sym-0.5-5] _________________ k = 5, alpha = 0.5, norm_type = 'sym', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.5000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.4472], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-sym-1.0-3] _________________ k = 3, alpha = 1.0, norm_type = 'sym', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.4472], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-sym-1.0-5] _________________ k = 5, alpha = 1.0, norm_type = 'sym', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.5774, 0.0000], [0.0000, 0.4472, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.5000], [0.0000, 0.0000, 0.0000, 0.0000]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-row-0.0-3] _________________ k = 3, alpha = 0.0, norm_type = 'row', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-row-0.0-5] _________________ k = 5, alpha = 0.0, norm_type = 'row', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-row-0.5-3] _________________ k = 3, alpha = 0.5, norm_type = 'row', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-row-0.5-5] _________________ k = 5, alpha = 0.5, norm_type = 'row', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-row-1.0-3] _________________ k = 3, alpha = 1.0, norm_type = 'row', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ________________ test_label_prop[False-False-False-row-1.0-5] _________________ k = 5, alpha = 1.0, norm_type = 'row', clamp = False, normalize = False reset = False @pytest.mark.parametrize('k', [3, 5]) @pytest.mark.parametrize('alpha', [0., 0.5, 1.]) @pytest.mark.parametrize('norm_type', ['sym', 'row']) @pytest.mark.parametrize('clamp', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('reset', [True, False]) def test_label_prop(k, alpha, norm_type, clamp, normalize, reset): dev = F.ctx() num_nodes = 5 num_edges = 20 num_classes = 4 g = dgl.rand_graph(num_nodes, num_edges).to(dev) labels = th.tensor([0, 2, 1, 3, 0]).long().to(dev) ml_labels = th.rand(num_nodes, num_classes).to(dev) > 0.7 mask = th.tensor([0, 1, 1, 1, 0]).bool().to(dev) model = nn.LabelPropagation(k, alpha, norm_type, clamp, normalize, reset) > model(g, labels, mask) tests\pytorch\test_nn.py:1601: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ c:\program files\python36\lib\site-packages\torch\nn\modules\module.py:1051: in _call_impl return forward_call(*input, **kwargs) python\dgl\nn\pytorch\utils.py:511: in forward g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python\dgl\heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python\dgl\core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python\dgl\core.py:332: in invoke_gspmm z = op(graph, x) python\dgl\ops\spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python\dgl\ops\spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0., 0., 0., 0.], [0., 0., 1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python\dgl\backend\pytorch\sparse.py:720: TypeError ============================== warnings summary =============================== python\dgl\backend\backend.py:1741 C:\Jenkins\workspace\dgl_PR-4648\python\dgl\backend\backend.py:1741: DeprecationWarning: invalid escape sequence \P """ tests/pytorch/test_dataloader.py::test_shadow[0] c:\program files\python36\lib\site-packages\numpy\matrixlib\defmatrix.py:69: PendingDeprecationWarning: the matrix subclass is not the recommended way to represent matrices or deal with linear algebra (see https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). Please adjust your code to use regular ndarray. return matrix(data, dtype=dtype, copy=False) tests/pytorch/test_dataloader.py: 80 warnings C:\Jenkins\workspace\dgl_PR-4648\python\dgl\dataloading\dataloader.py:863: DGLWarning: Dataloader CPU affinity opt is not enabled, consider switching it on (see enable_cpu_affinity() or CPU best practices for DGL [https://docs.dgl.ai/tutorials/cpu/cpu_best_practises.html]) dgl_warning(f'Dataloader CPU affinity opt is not enabled, consider switching it on ' tests/pytorch/test_dataloader.py: 48 warnings C:\Jenkins\workspace\dgl_PR-4648\python\dgl\dataloading\dataloader.py:83: DGLWarning: The current output_nodes are out of order even if set shuffle to False in Dataloader, the reason is that the current version of torch dose not support stable sort. Please update torch to 1.10.0 or higher to fix it. 'The current output_nodes are out of order even if set shuffle ' tests/pytorch/test_nn.py: 11 warnings tests/pytorch/test_pickle.py: 1 warning C:\Jenkins\workspace\dgl_PR-4648\python\dgl\heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/pytorch/test_nn.py::test_set_trans C:\Jenkins\workspace\dgl_PR-4648\python\dgl\nn\pytorch\glob.py:700: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). lengths_x = th.tensor(lengths_x, dtype=th.int64, device=device) tests/pytorch/test_nn.py::test_set_trans C:\Jenkins\workspace\dgl_PR-4648\python\dgl\nn\pytorch\glob.py:701: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). lengths_mem = th.tensor(lengths_mem, dtype=th.int64, device=device) tests/pytorch/test_nn.py::test_dense_cheb_conv[1] tests/pytorch/test_nn.py::test_dense_cheb_conv[2] C:\Jenkins\workspace\dgl_PR-4648\python\dgl\heterograph.py:84: DGLWarning: Keyword arguments ['readonly'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html --- generated xml file: C:\Jenkins\workspace\dgl_PR-4648\pytest_backend.xml --- ============================ slowest 100 durations ============================ 10.97s call tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-None-False] 10.86s call tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-None-False] 10.77s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor2-idtype0] 10.75s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor2-idtype1] 10.39s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-full-idtype0] 9.99s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-full-idtype1] 9.63s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor-idtype1] 9.55s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor2-idtype0] 9.43s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-full-idtype0] 9.33s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor-idtype1] 9.32s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-full-idtype1] 9.27s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor-idtype0] 9.20s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor2-idtype1] 9.18s call tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-cpu-idtype1] 9.17s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor-idtype0] 9.00s call tests/pytorch/test_dataloader.py::test_shadow[0] 7.32s call tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-cpu-idtype0] 6.69s call tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-cpu-idtype1] 6.26s call tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-cpu-idtype0] 3.16s call tests/pytorch/test_dataloader.py::test_shadow[4] 2.84s call tests/pytorch/test_dataloader.py::test_saint[node-4] 2.83s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-32-32] 2.82s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-32-16] 2.82s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-16-16] 2.81s call tests/pytorch/test_dataloader.py::test_saint[walk-4] 2.81s call tests/pytorch/test_dataloader.py::test_saint[edge-4] 2.65s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-32-16] 2.64s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-32-32] 2.46s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-32-16] 2.08s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-16-32] 2.08s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-32-32] 1.91s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-16-32] 1.89s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-16-16] 1.81s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-16-32] 1.73s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-16-16] 1.71s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-32-16] 1.56s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators0-32-32] 1.54s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-32-32] 1.48s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-16-32] 1.43s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-16-32] 1.35s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-16-16] 1.35s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-32-32] 1.30s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators1-16-16] 1.29s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators1-16-32] 1.26s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-16-16] 1.24s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators0-16-32] 1.21s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-32-16] 1.21s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-32-16] 1.20s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-16-32] 1.12s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-16-32] 1.08s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-16-32] 1.01s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators0-16-32] 0.96s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-16-16] 0.95s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators0-16-16] 0.94s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators0-32-16] 0.93s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-16-16] 0.93s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators1-16-32] 0.93s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators0-16-16] 0.93s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-32-32] 0.91s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators0-16-16] 0.85s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators0-32-32] 0.83s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators2-32-32] 0.82s call tests/pytorch/test_dataloader.py::test_saint[edge-0] 0.80s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-32-16] 0.79s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators1-16-32] 0.76s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-16-32] 0.76s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators0-32-32] 0.76s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators1-32-16] 0.75s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators1-16-32] 0.75s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators1-32-16] 0.75s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators0-32-16] 0.74s call tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators1-16-16] 0.74s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators0-32-32] 0.74s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-16-16] 0.73s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-32-16] 0.73s call tests/pytorch/test_dataloader.py::test_saint[walk-0] 0.69s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-32-32] 0.69s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators2-32-16] 0.69s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators2-32-16] 0.67s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-full-idtype1] 0.67s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-full-idtype0] 0.67s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-32-32] 0.65s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-full-idtype1] 0.65s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-full-idtype0] 0.64s call tests/pytorch/test_dataloader.py::test_saint[node-0] 0.64s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-full-idtype0] 0.63s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-16-32] 0.63s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-full-idtype1] 0.63s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-full-idtype0] 0.62s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-full-idtype0] 0.60s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-full-idtype1] 0.60s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-32-32] 0.59s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators1-16-32] 0.58s call tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators1-16-32] 0.58s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators1-32-16] 0.58s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators2-16-32] 0.58s call tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-16-16] 0.57s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-full-idtype0] 0.57s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators2-16-32] 0.57s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-32-16] =========================== short test summary info =========================== FAILED tests/pytorch/test_geometry.py::test_knn_cpu[True-euclidean-bruteforce-blas] FAILED tests/pytorch/test_geometry.py::test_knn_cpu[True-euclidean-bruteforce] FAILED tests/pytorch/test_geometry.py::test_knn_cpu[True-euclidean-kd-tree] FAILED tests/pytorch/test_geometry.py::test_knn_cpu[True-cosine-bruteforce-blas] FAILED tests/pytorch/test_geometry.py::test_knn_cpu[True-cosine-bruteforce] FAILED tests/pytorch/test_geometry.py::test_knn_cpu[True-cosine-kd-tree] - Ty... FAILED tests/pytorch/test_nn.py::test_graph_conv0[1] - TypeError: empty_conte... FAILED tests/pytorch/test_nn.py::test_graph_conv0[2] - TypeError: empty_conte... FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g2-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g2-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g3-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g3-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g4-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g4-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g5-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g5-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g6-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g6-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g7-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g7-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g0-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g0-idtype1] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g1-idtype0] FAILED tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g1-idtype1] FAILED tests/pytorch/test_nn.py::test_tagconv[1] - TypeError: empty_context()... FAILED tests/pytorch/test_nn.py::test_tagconv[2] - TypeError: empty_context()... FAILED tests/pytorch/test_nn.py::test_set2set - TypeError: empty_context() go... FAILED tests/pytorch/test_nn.py::test_glob_att_pool - TypeError: empty_contex... FAILED tests/pytorch/test_nn.py::test_simple_pool - TypeError: empty_context(... FAILED tests/pytorch/test_nn.py::test_rgcn[1-idtype0] - TypeError: empty_cont... FAILED tests/pytorch/test_nn.py::test_rgcn[1-idtype1] - TypeError: empty_cont... FAILED tests/pytorch/test_nn.py::test_rgcn[8-idtype0] - TypeError: empty_cont... FAILED tests/pytorch/test_nn.py::test_rgcn[8-idtype1] - TypeError: empty_cont... FAILED tests/pytorch/test_nn.py::test_rgcn[32-idtype0] - TypeError: empty_con... FAILED tests/pytorch/test_nn.py::test_rgcn[32-idtype1] - TypeError: empty_con... FAILED tests/pytorch/test_nn.py::test_rgcn_default_nbasis[1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_rgcn_default_nbasis[1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_rgcn_default_nbasis[10-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_rgcn_default_nbasis[10-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_rgcn_default_nbasis[40-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_rgcn_default_nbasis[40-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g0-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g0-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g1-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g1-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g2-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g2-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g3-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g3-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g4-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g4-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g5-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g5-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g6-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-1-g6-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g0-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g0-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g1-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g1-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g2-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g2-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g3-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g3-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g4-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g4-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g5-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g5-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g6-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[1-5-g6-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g0-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g0-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g1-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g1-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g2-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g2-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g3-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g3-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g4-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g4-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g5-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g5-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g6-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-1-g6-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g0-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g0-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g1-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g1-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g2-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g2-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g3-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g3-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g4-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g4-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g5-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g5-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g6-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv[4-5-g6-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g2-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g2-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g3-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g3-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g4-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g4-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g5-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g5-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g6-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g6-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g2-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g2-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g3-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g3-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g4-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g4-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g5-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g5-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g6-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g6-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g2-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g2-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g3-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g3-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g4-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g4-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g5-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g5-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g6-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g6-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g2-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g2-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g3-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g3-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g4-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g4-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g5-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g5-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g6-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g6-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g0-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g0-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g1-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g1-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g0-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g0-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g1-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g1-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g0-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g0-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g1-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g1-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g0-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g0-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g1-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g1-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g2-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g2-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g3-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g3-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g4-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g4-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g5-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g5-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g6-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g6-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g7-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[mean-g7-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g2-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g2-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g3-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g3-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g4-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g4-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g5-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g5-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g6-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g6-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g7-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[pool-g7-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g0-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g0-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g1-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g1-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g2-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g2-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g3-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g3-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g4-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g4-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g5-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g5-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g6-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g6-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g7-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[gcn-g7-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g2-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g2-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g3-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g3-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g4-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g4-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g5-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g5-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g6-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g6-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g7-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv[lstm-g7-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g0-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g0-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g1-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g1-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g2-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g2-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g0-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g0-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g1-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g1-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g2-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g2-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g2-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g2-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g0-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g0-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g1-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g1-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g2-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g2-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g0-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g0-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g1-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g1-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g2-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g2-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g0-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g0-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g1-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g1-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g2-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g2-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g2-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g2-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g0-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g0-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g1-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g1-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g2-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g2-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_sage_conv2[1-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_sage_conv2[1-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_sage_conv2[2-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_sage_conv2[2-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g0-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g0-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g1-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g1-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g2-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g2-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g3-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g3-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g4-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g4-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g5-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[1-g5-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g0-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g0-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g1-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g1-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g2-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g2-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g3-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g3-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g4-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g4-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g5-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_sgc_conv[2-g5-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g0-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g0-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g1-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g1-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g2-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g2-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g3-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g3-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g4-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g4-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g5-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv[g5-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g0-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g0-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g1-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g1-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g2-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g2-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g3-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g3-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g4-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g4-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g5-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g5-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g0-idtype0] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g0-idtype1] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g1-idtype0] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g1-idtype1] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g2-idtype0] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g2-idtype1] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g3-idtype0] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g3-idtype1] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g4-idtype0] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g4-idtype1] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g5-idtype0] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g5-idtype1] - Ty... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g0-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g0-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g1-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g1-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g2-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g2-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g3-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g3-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g4-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g4-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g5-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g5-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_sgconv_e_weight[g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g0-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g0-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g1-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g1-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g2-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g2-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g3-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g3-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g4-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g4-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g5-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_tagconv_e_weight[g5-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g0-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g0-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g1-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g1-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g2-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g2-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g3-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g3-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g4-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g4-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g5-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g5-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g6-idtype0] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[mean-g6-idtype1] - TypeError: ... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g0-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g0-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g1-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g1-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g2-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g2-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g3-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g3-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g4-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g4-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g5-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g5-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g6-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[max-g6-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g0-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g0-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g1-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g1-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g2-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g2-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g3-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g3-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g4-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g4-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g5-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g5-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g6-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gin_conv[sum-g6-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gine_conv[g0-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g0-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g1-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g1-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g2-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g2-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g3-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g3-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g4-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g4-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g5-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g5-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g6-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g6-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g7-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gine_conv[g7-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g0-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g0-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g1-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g1-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[max-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[max-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[max-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[max-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g0-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g0-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g1-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g1-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g2-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g2-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g3-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g3-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g4-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g5-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g5-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g6-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv[g6-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_agnn_conv_bi[g0-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_agnn_conv_bi[g0-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_agnn_conv_bi[g1-idtype0] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_agnn_conv_bi[g1-idtype1] - TypeError: e... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g0-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g0-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g1-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g1-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g2-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g2-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g3-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g3-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g4-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g4-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g5-idtype0] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv[g5-idtype1] - TypeErro... FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g0-idtype0] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g0-idtype1] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g1-idtype0] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g1-idtype1] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g2-idtype0] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g2-idtype1] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g3-idtype0] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g3-idtype1] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g4-idtype0] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g4-idtype1] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g5-idtype0] FAILED tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g5-idtype1] FAILED tests/pytorch/test_nn.py::test_nn_conv[g0-idtype0] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g0-idtype1] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g1-idtype0] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g1-idtype1] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g2-idtype0] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g2-idtype1] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g3-idtype0] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g3-idtype1] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g4-idtype0] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g4-idtype1] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g5-idtype0] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g5-idtype1] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g6-idtype0] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv[g6-idtype1] - TypeError: empty_... FAILED tests/pytorch/test_nn.py::test_nn_conv_bi[g0-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_nn_conv_bi[g0-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_nn_conv_bi[g1-idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_nn_conv_bi[g1-idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g0-idtype0] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g0-idtype1] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g1-idtype0] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g1-idtype1] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g2-idtype0] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g2-idtype1] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g3-idtype0] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g3-idtype1] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g4-idtype0] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g4-idtype1] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g5-idtype0] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv[g5-idtype1] - TypeError: empty... FAILED tests/pytorch/test_nn.py::test_gmm_conv_bi[g0-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_gmm_conv_bi[g0-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_gmm_conv_bi[g1-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_gmm_conv_bi[g1-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-both-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-both-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-right-idtype0] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-right-idtype1] - ... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-none-idtype0] - T... FAILED tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-none-idtype1] - T... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g0-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g0-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g1-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g1-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g2-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g2-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g3-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g3-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g4-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g4-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g5-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g5-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g6-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g6-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g7-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g7-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g8-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g8-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g9-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[1-g9-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g0-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g0-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g1-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g1-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g2-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g2-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g3-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g3-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g4-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g4-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g5-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g5-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g6-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g6-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g7-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g7-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g8-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g8-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g9-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_dense_sage_conv[2-g9-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g0-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g0-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g1-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g1-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g2-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g2-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g3-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g3-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g4-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g4-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g5-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g5-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g6-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[1-g6-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g0-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g0-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g1-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g1-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g2-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g2-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g3-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g3-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g4-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g4-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g5-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g5-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g6-idtype0] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv[2-g6-idtype1] - TypeError: em... FAILED tests/pytorch/test_nn.py::test_edge_conv_bi[1-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_edge_conv_bi[1-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_edge_conv_bi[1-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_edge_conv_bi[1-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_edge_conv_bi[2-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_edge_conv_bi[2-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_edge_conv_bi[2-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_edge_conv_bi[2-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g6-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g6-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g6-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g6-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g6-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g6-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g0-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g0-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g1-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g1-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g2-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g2-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g3-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g3-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g4-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g4-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g5-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g5-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g6-idtype0] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g6-idtype1] - TypeError... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g0-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g0-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g1-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g1-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_dense_cheb_conv[1] - TypeError: empty_c... FAILED tests/pytorch/test_nn.py::test_dense_cheb_conv[2] - TypeError: empty_c... FAILED tests/pytorch/test_nn.py::test_sequential - TypeError: empty_context()... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g0-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g0-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g1-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g1-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g2-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g2-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g3-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g3-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g4-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g4-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g5-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g5-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g6-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g6-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g7-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[1-g7-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g0-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g0-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g1-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g1-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g2-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g2-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g3-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g3-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g4-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g4-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g5-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g5-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g6-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g6-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g7-idtype0] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_cf_conv[3-g7-idtype1] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-sum-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-sum-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-max-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-max-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-min-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-min-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-mean-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-mean-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-stack-idtype0] - Type... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-stack-idtype1] - Type... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-myagg-idtype0] - Type... FAILED tests/pytorch/test_nn.py::test_hetero_conv[False-myagg-idtype1] - Type... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-sum-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-sum-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-max-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-max-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-min-idtype0] - TypeErr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-min-idtype1] - TypeErr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-mean-idtype0] - TypeEr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-mean-idtype1] - TypeEr... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-stack-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-stack-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-myagg-idtype0] - TypeE... FAILED tests/pytorch/test_nn.py::test_hetero_conv[True-myagg-idtype1] - TypeE... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g2-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g2-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g3-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g3-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g4-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g4-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g5-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[1-g5-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g0-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g0-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g1-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g1-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g2-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g2-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g3-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g3-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g4-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g4-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g5-idtype0] - TypeError:... FAILED tests/pytorch/test_nn.py::test_gnnexplainer[2-g5-idtype1] - TypeError:... FAILED tests/pytorch/test_nn.py::test_twirls - TypeError: empty_context() got... FAILED tests/pytorch/test_nn.py::test_hgt[1-4-idtype0] - TypeError: empty_con... FAILED tests/pytorch/test_nn.py::test_hgt[1-4-idtype1] - TypeError: empty_con... FAILED tests/pytorch/test_nn.py::test_group_rev_res[idtype0] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_group_rev_res[idtype1] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[16-16-16-16] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[16-16-16-32] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[16-16-32-16] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[16-16-32-32] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[16-32-16-16] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[16-32-16-32] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[16-32-32-16] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[16-32-32-32] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[10-16-16-16] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[10-16-16-32] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[10-16-32-16] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[10-16-32-32] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[10-32-16-16] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[10-32-16-32] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[10-32-32-16] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[10-32-32-32] - TypeError: emp... FAILED tests/pytorch/test_nn.py::test_egnn_conv[0-16-16-16] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_egnn_conv[0-16-16-32] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_egnn_conv[0-16-32-16] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_egnn_conv[0-16-32-32] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_egnn_conv[0-32-16-16] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_egnn_conv[0-32-16-32] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_egnn_conv[0-32-32-16] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_egnn_conv[0-32-32-32] - TypeError: empt... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.0-3] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.0-5] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.5-3] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.5-5] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-1.0-3] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-1.0-5] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.0-3] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.0-5] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.5-3] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.5-5] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-1.0-3] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-1.0-5] - ... FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-1.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.0-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.5-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.5-5] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-1.0-3] FAILED tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-1.0-5] == 1811 failed, 1812 passed, 355 skipped, 146 warnings in 928.17s (0:15:28) === Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... [Pipeline] } [Pipeline] // timeout [Pipeline] } [Pipeline] // stage [Pipeline] stage [Pipeline] { (Torch CPU (Win64) Example test) Stage "Torch CPU (Win64) Example test" skipped due to earlier failure(s) [Pipeline] } [Pipeline] // stage Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... [WS-CLEANUP] done [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... [Pipeline] // stage [Pipeline] } Failed in branch Torch CPU (Win64) Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-gpu-linux [Pipeline] timeout Timeout set to expire in 30 min [Pipeline] { [Pipeline] sh + bash tests/scripts/task_unit_test.sh tensorflow gpu Requirement already satisfied: pytest in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (7.1.2) Collecting psutil Downloading psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (281 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 281.3/281.3 kB 9.8 MB/s eta 0:00:00 Collecting pyyaml Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 596.3/596.3 kB 38.5 MB/s eta 0:00:00 Collecting pydantic Downloading pydantic-1.10.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.8 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 11.8/11.8 MB 116.7 MB/s eta 0:00:00 Collecting pandas Using cached pandas-1.1.5-cp37-cp37m-manylinux1_x86_64.whl (9.5 MB) Collecting rdflib Downloading rdflib-6.2.0-py3-none-any.whl (500 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 500.3/500.3 kB 66.1 MB/s eta 0:00:00 Collecting ogb Downloading ogb-1.3.4-py3-none-any.whl (78 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 78.6/78.6 kB 17.8 MB/s eta 0:00:00 Requirement already satisfied: attrs>=19.2.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (22.1.0) Requirement already satisfied: pluggy<2.0,>=0.12 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (1.0.0) Requirement already satisfied: py>=1.8.2 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (1.11.0) Requirement already satisfied: importlib-metadata>=0.12 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (4.12.0) Requirement already satisfied: iniconfig in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (1.1.1) Requirement already satisfied: packaging in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (21.3) Requirement already satisfied: tomli>=1.0.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (2.0.1) Requirement already satisfied: typing-extensions>=4.1.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pydantic) (4.3.0) Requirement already satisfied: numpy>=1.15.4 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pandas) (1.18.5) Collecting pytz>=2017.2 Using cached pytz-2022.2.1-py2.py3-none-any.whl (500 kB) Requirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pandas) (2.8.2) Requirement already satisfied: setuptools in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from rdflib) (61.2.0) Requirement already satisfied: pyparsing in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from rdflib) (3.0.9) Collecting isodate Downloading isodate-0.6.1-py2.py3-none-any.whl (41 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 41.7/41.7 kB 9.0 MB/s eta 0:00:00 Collecting scikit-learn>=0.20.0 Using cached scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (24.8 MB) Requirement already satisfied: six>=1.12.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from ogb) (1.16.0) Requirement already satisfied: tqdm>=4.29.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from ogb) (4.64.0) Requirement already satisfied: urllib3>=1.24.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from ogb) (1.26.11) Collecting torch>=1.6.0 Downloading torch-1.12.1-cp37-cp37m-manylinux1_x86_64.whl (776.3 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 776.3/776.3 MB 4.3 MB/s eta 0:00:00 Collecting outdated>=0.2.0 Using cached outdated-0.2.1-py3-none-any.whl (7.5 kB) Requirement already satisfied: zipp>=0.5 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from importlib-metadata>=0.12->pytest) (3.8.1) Collecting littleutils Using cached littleutils-0.2.2-py3-none-any.whl Requirement already satisfied: requests in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from outdated>=0.2.0->ogb) (2.28.1) Collecting threadpoolctl>=2.0.0 Using cached threadpoolctl-3.1.0-py3-none-any.whl (14 kB) Requirement already satisfied: scipy>=1.1.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.4.1) Requirement already satisfied: joblib>=0.11 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.1.0) Requirement already satisfied: charset-normalizer<3,>=2 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2.1.0) Requirement already satisfied: idna<4,>=2.5 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (3.3) Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2022.6.15) Installing collected packages: pytz, littleutils, torch, threadpoolctl, pyyaml, pydantic, psutil, isodate, scikit-learn, rdflib, pandas, outdated, ogb Successfully installed isodate-0.6.1 littleutils-0.2.2 ogb-1.3.4 outdated-0.2.1 pandas-1.1.5 psutil-5.9.2 pydantic-1.10.2 pytz-2022.2.1 pyyaml-6.0 rdflib-6.2.0 scikit-learn-1.0.2 threadpoolctl-3.1.0 torch-1.12.1 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/tensorflow-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648 collecting ... collected 2458 items tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype0] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype1] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[idtype0] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[idtype1] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype0] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype1] SKIPPED [ 0%] tests/compute/test_backend.py::test_set_default_backend PASSED [ 0%] tests/compute/test_basics.py::test_compatible PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_dynamic_addition PASSED [ 0%] tests/compute/test_basics.py::test_repr[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_repr[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[idtype0] SKIPPED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[idtype1] SKIPPED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_propagate[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_propagate[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype0-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype0-gs1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype1-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype1-gs1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[idtype0] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[idtype1] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_netypes SKIPPED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype1-idtype1] PASSED [ 4%] tests/compute/test_data.py::test_minigc SKIPPED (Datasets don't need...) [ 4%] tests/compute/test_data.py::test_gin SKIPPED (Datasets don't need to...) [ 4%] tests/compute/test_data.py::test_fraud SKIPPED (Datasets don't need ...) [ 5%] tests/compute/test_data.py::test_fakenews SKIPPED (Datasets don't ne...) [ 5%] tests/compute/test_data.py::test_tudataset_regression SKIPPED (Datas...) [ 5%] tests/compute/test_data.py::test_data_hash SKIPPED (Datasets don't n...) [ 5%] tests/compute/test_data.py::test_citation_graph SKIPPED (Datasets do...) [ 5%] tests/compute/test_data.py::test_gnn_benchmark SKIPPED (Datasets don...) [ 5%] tests/compute/test_data.py::test_reddit SKIPPED (Datasets don't need...) [ 5%] tests/compute/test_data.py::test_explain_syn SKIPPED (Datasets don't...) [ 5%] tests/compute/test_data.py::test_wiki_cs SKIPPED (Datasets don't nee...) [ 5%] tests/compute/test_data.py::test_yelp SKIPPED (Dataset too large to ...) [ 5%] tests/compute/test_data.py::test_flickr SKIPPED (Datasets don't need...) [ 5%] tests/compute/test_data.py::test_extract_archive SKIPPED (Datasets d...) [ 5%] tests/compute/test_data.py::test_csvdataset SKIPPED (Datasets don't ...) [ 5%] tests/compute/test_data.py::test_add_nodepred_split SKIPPED (Dataset...) [ 5%] tests/compute/test_data.py::test_as_nodepred1 SKIPPED (Datasets don'...) [ 5%] tests/compute/test_data.py::test_as_nodepred2 SKIPPED (Datasets don'...) [ 5%] tests/compute/test_data.py::test_as_nodepred_ogb SKIPPED (ogb only s...) [ 5%] tests/compute/test_data.py::test_as_linkpred SKIPPED (Datasets don't...) [ 5%] tests/compute/test_data.py::test_as_linkpred_ogb SKIPPED (ogb only s...) [ 5%] tests/compute/test_data.py::test_as_nodepred_csvdataset SKIPPED (Dat...) [ 5%] tests/compute/test_data.py::test_as_graphpred SKIPPED (Datasets don'...) [ 5%] tests/compute/test_data.py::test_as_graphpred_reprocess SKIPPED (Dat...) [ 5%] tests/compute/test_data.py::test_as_graphpred_ogb SKIPPED (ogb only ...) [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[idtype0] PASSED [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[idtype1] PASSED [ 5%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax_unidirectional SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype0-src-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype0-dst-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype1-src-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype1-dst-g0] SKIPPED [ 6%] tests/compute/test_ffi.py::test_cython PASSED [ 6%] tests/compute/test_ffi.py::test_callback[1] PASSED [ 6%] tests/compute/test_ffi.py::test_callback[2.3] PASSED [ 6%] tests/compute/test_ffi.py::test_callback_thread[1] PASSED [ 6%] tests/compute/test_ffi.py::test_callback_thread[2.3] PASSED [ 6%] tests/compute/test_filter.py::test_graph_filter PASSED [ 6%] tests/compute/test_filter.py::test_array_filter[idtype0] PASSED [ 6%] tests/compute/test_filter.py::test_array_filter[idtype1] PASSED [ 6%] tests/compute/test_frame.py::test_column_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_plain PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_dtype PASSED [ 6%] tests/compute/test_generators.py::test_rand_graph SKIPPED (GPU rando...) [ 6%] tests/compute/test_graph.py::test_query PASSED [ 6%] tests/compute/test_graph.py::test_mutation PASSED [ 6%] tests/compute/test_graph.py::test_scipy_adjmat PASSED [ 6%] tests/compute/test_graph.py::test_incmat PASSED [ 6%] tests/compute/test_graph.py::test_find_edges PASSED [ 6%] tests/compute/test_graph.py::test_ismultigraph PASSED [ 6%] tests/compute/test_graph.py::test_hypersparse_query PASSED [ 6%] tests/compute/test_graph.py::test_empty_data_initialized PASSED [ 7%] tests/compute/test_graph.py::test_is_sorted PASSED [ 7%] tests/compute/test_graph.py::test_default_types PASSED [ 7%] tests/compute/test_graph.py::test_formats PASSED [ 7%] tests/compute/test_heterograph.py::test_create[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_create[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_create2 PASSED [ 7%] tests/compute/test_heterograph.py::test_query[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_query[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_device2[g1-idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_device2[g1-idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_pin_memory_[idtype0] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_pin_memory_[idtype1] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_convert_bound[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert_bound[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[idtype0] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[idtype1] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_more_nnz[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_more_nnz[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_updates[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_updates[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_backward[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_backward[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_float_cast PASSED [ 9%] tests/compute/test_heterograph.py::test_format[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_format[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_edges_order[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_edges_order[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_reverse[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_reverse[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_clone[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_clone[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame_device[idtype0] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_frame_device[idtype1] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_create_block[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_create_block[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csc-idtype0] PASSED [ 11%] tests/compute/test_heterograph.py::test_adj_sparse[csc-idtype1] PASSED [ 11%] tests/compute/test_heterograph.py::test_forking_pickler PASSED [ 11%] tests/compute/test_index.py::test_dlpack SKIPPED (TF doesn't support...) [ 11%] tests/compute/test_kernel.py::test_copy_src_reduce PASSED [ 11%] tests/compute/test_kernel.py::test_copy_edge_reduce PASSED [ 11%] tests/compute/test_kernel.py::test_all_binary_builtins PASSED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-idtype0] PASSED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-idtype1] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[idtype0] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[idtype1] PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_id PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_remainder PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_remainder PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_range PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_range PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_support PASSED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype0] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype1] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype0] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype1] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype0] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype1] SKIPPED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[idtype0] PASSED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_index PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph_index PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g0-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g0-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g1-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g1-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g11-idtype0] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g11-idtype1] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g12-idtype0] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g12-idtype1] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_batched_heterograph SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_subgraph SKIPPED (GPU ed...) [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[idtype0] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[idtype1] SKIPPED [ 13%] tests/compute/test_pin_memory.py::test_pin_unpin PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[idtype0] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[idtype1] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[idtype0] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[idtype1] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype0] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype1] SKIPPED [ 13%] tests/compute/test_random.py::test_random_choice SKIPPED (GPU random...) [ 13%] tests/compute/test_readout.py::test_sum_case1[idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_sum_case1[idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g0-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g0-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g4-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g4-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g5-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g5-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g1-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g1-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g2-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g2-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g3-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g3-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g4-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g4-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g5-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g5-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g6-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g6-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g2-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g2-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g3-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g3-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g4-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g4-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_issue1287[idtype0] PASSED [ 20%] tests/compute/test_removal.py::test_issue1287[idtype1] PASSED [ 20%] tests/compute/test_sampler.py::test_create_full PASSED [ 20%] tests/compute/test_sampler.py::test_1neighbor_sampler_all PASSED [ 20%] tests/compute/test_sampler.py::test_1neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_prefetch_neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler_all PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_layer_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler SKIPPED [ 20%] tests/compute/test_sampler.py::test_setseed PASSED [ 20%] tests/compute/test_sampler.py::test_negative_sampler SKIPPED (TF doe...) [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[True] PASSED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_node2vec SKIPPED (GPU random wa...) [ 20%] tests/compute/test_sampling.py::test_pack_traces SKIPPED (GPU pack t...) [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[True] PASSED [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[False] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_noprob PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_prob PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_outedge PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk SKIPPED (...) [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk_outedge SKIPPED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_with_0deg PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_biased_homogeneous SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_biased_bipartite SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[True] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[False] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[True] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[False] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[True] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[False] SKIPPED [ 22%] tests/compute/test_serialize.py::test_serialize_tensors PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_empty_dict PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files1 PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files2 PASSED [ 22%] tests/compute/test_serialize.py::test_deserialize_old_heterograph_file PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph SKIPPED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph_s3 SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_single_process[idtype0] SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_single_process[idtype1] SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[idtype0] SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[idtype1] SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_copy_from_gpu SKIPPED (Not su...) [ 22%] tests/compute/test_sort.py::test_sort_with_tag[idtype0] SKIPPED (GPU...) [ 22%] tests/compute/test_sort.py::test_sort_with_tag[idtype1] SKIPPED (GPU...) [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[idtype0] SKIPPED [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[idtype1] SKIPPED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g0] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g1] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp4-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp4-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp5-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp5-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp0-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp0-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp1-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp1-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp2-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp2-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp4-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp4-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp5-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp5-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp2-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp2-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp4-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp4-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp5-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp5-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp0-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp0-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp1-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp2-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp2-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp4-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp4-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp5-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp5-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp0-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp0-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp2-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp2-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp3-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp3-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp5-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp5-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp0-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp0-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp1-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp2-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp2-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp3-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp3-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp4-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp4-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp5-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp5-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp0-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp0-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp1-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp2-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp3-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp4-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp4-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp5-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp5-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp0-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp1-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp1-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp2-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp3-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp5-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp5-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp2-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp5-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp5-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp0-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp2-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp3-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp3-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp4-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp4-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp5-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp5-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp0-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp1-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp1-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp2-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp2-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp3-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp3-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp4-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp5-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp5-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp0-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp1-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp1-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp2-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp2-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp3-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp3-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp4-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp5-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp5-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp0-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp1-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp1-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp2-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp2-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp3-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp3-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp4-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp0-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp0-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp2-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp3-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp3-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp4-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp0-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp0-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp1-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp1-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp2-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp3-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp3-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp4-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp4-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp5-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp5-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp0-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp0-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp1-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp3-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp3-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp4-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp4-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp5-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp5-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp0-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp0-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp1-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp3-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp4-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp5-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp5-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp4-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp5-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp5-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp1-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp1-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp2-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp4-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp5-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp0-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp0-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp1-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp1-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp2-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp3-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp4-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp5-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp0-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp0-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp1-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp1-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp2-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp3-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp4-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp0-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp1-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp2-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp2-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp3-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp3-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp4-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp0-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp1-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp2-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp2-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp3-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp3-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp4-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp5-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp5-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp0-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp0-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp1-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp2-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp2-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp3-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp4-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp5-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp5-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp0-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp0-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp1-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp2-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp2-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp3-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp4-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp5-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp5-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp1-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp1-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp2-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp3-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp3-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp4-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp4-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp5-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp5-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp1-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp1-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp2-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp3-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp3-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp4-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp5-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp5-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp0-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp0-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp1-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp2-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp2-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp3-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp3-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp4-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp5-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp5-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp0-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp0-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp1-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp2-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp2-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp3-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp3-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp4-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp5-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp5-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp0-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp0-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp1-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp2-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp3-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp4-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp4-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp5-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp5-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp1-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp2-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp3-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp4-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp4-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp0-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp0-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp2-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp3-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp4-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp5-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp5-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp0-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp0-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp1-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp2-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp3-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp4-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp5-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp5-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp0-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp0-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp1-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp2-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp2-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp3-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp4-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp4-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp5-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp5-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp0-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp0-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp1-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp2-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp2-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp3-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp4-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp4-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp5-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp5-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp1-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp3-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp4-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp5-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp5-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp1-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp3-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp4-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp1-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp3-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp0-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp1-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp3-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp0-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp1-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp2-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp2-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp3-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp4-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp3-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp4-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp1-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp1-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp2-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp4-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp4-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp0-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp2-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp4-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp4-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp0-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp0-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp1-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp0-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp1-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp4-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp4-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp0-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp1-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp1-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp0-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp1-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp1-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp3-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp3-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp0-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp1-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp0-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp1-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp0-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp0-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp1-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp0-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp0-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp1-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp2-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp2-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp0-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp0-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp2-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp2-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp4-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp4-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp0-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp4-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp4-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp0-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp0-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp1-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp2-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp2-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp0-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp1-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp2-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp2-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp4-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp4-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp1-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp1-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp2-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp1-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp1-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp2-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp3-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp4-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp3-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp4-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp2-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp4-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp0-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp1-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp2-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp4-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp0-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp1-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp2-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp0-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp0-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp1-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp0-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp0-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp1-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp2-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp3-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp2-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp3-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp3-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp4-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp0-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp3-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp4-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp0-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp2-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp3-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp2-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp3-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp4-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp4-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp0-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp1-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp2-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp2-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp0-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp1-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp2-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp2-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp3-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp4-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp4-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp3-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp4-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp4-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp0-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp1-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp0-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp1-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp4-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp0-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp1-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp4-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp0-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp1-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp4-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp0-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp4-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp0-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp3-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp3-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp3-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp4-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp4-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp0-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp3-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp4-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp4-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp0-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp1-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp2-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp3-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp1-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp2-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp3-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp3-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp4-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp4-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp3-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp4-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp4-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp1-g1] Running on dgl-manual-large-cpu in /root/jenkins/workspace/dgl_PR-4648 [Pipeline] { PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp2-g1] [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp0-g1] Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp1-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp2-g1] Cleaning workspace Fetching without tags PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp3-g1] > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp4-g0] Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Running on dgl-manual-large-cpu in /root/jenkins/workspace/dgl_PR-4648@2 [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp0-g0] Commit message: "fix for pytorch < 1.12" Cleaning workspace PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp1-g1] > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp2-g0] [Pipeline] withEnv [Pipeline] { Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648@2 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp3-g0] + docker pull dgllib/dgl-ci-cpu:v220816 PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp3-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp4-g0] Cleaning workspace Fetching without tags PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp4-g1] Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp3-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp3-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp4-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp4-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp0-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp0-g1] Commit message: "fix for pytorch < 1.12" Cleaning workspace > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp1-g0] v220816: Pulling from dgllib/dgl-ci-cpu Digest: sha256:64b385c33b44dc57cb96ff264a84d8dfb8ced0caa9b30fbc4cec6d5ee511b099 Status: Image is up to date for dgllib/dgl-ci-cpu:v220816 docker.io/dgllib/dgl-ci-cpu:v220816 [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp1-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp2-g0] + docker inspect -f . dgllib/dgl-ci-cpu:v220816 . PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp2-g1] [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dgl-manual-large-cpu does not seem to be running inside a container $ docker run -t -d -u 0:0 -w /root/jenkins/workspace/dgl_PR-4648 -v /root/jenkins/workspace/dgl_PR-4648:/root/jenkins/workspace/dgl_PR-4648:rw,z -v /root/jenkins/workspace/dgl_PR-4648@tmp:/root/jenkins/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-cpu:v220816 cat PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g1] > git clean -fdx # timeout=10 PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g0] $ docker top 280b39495ad3fad21dbb9762a178907987f011c3e8f474eb1caee524b0ff882c -eo pid,comm PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g0] [Pipeline] { [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh [Pipeline] sh PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g1] + docker pull dgllib/dgl-ci-cpu:v220816 PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp1-g0] + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@7c2abf69; decorates RemoteLauncher[hudson.remoting.Channel@1d9b9638:dgl-manual-large-cpu] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp1-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp2-g0] Fetching without tags PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp2-g1] Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp3-g1] v220816: Pulling from dgllib/dgl-ci-cpu Digest: sha256:64b385c33b44dc57cb96ff264a84d8dfb8ced0caa9b30fbc4cec6d5ee511b099 Status: Image is up to date for dgllib/dgl-ci-cpu:v220816 docker.io/dgllib/dgl-ci-cpu:v220816 Commit message: "fix for pytorch < 1.12" Cleaning workspace [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp4-g1] [Pipeline] withEnv [Pipeline] { > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] sh PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp0-g0] + docker inspect -f . dgllib/dgl-ci-cpu:v220816 . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dgl-manual-large-cpu does not seem to be running inside a container $ docker run -t -d -u 0:0 -w /root/jenkins/workspace/dgl_PR-4648@2 -v /root/jenkins/workspace/dgl_PR-4648@2:/root/jenkins/workspace/dgl_PR-4648@2:rw,z -v /root/jenkins/workspace/dgl_PR-4648@2@tmp:/root/jenkins/workspace/dgl_PR-4648@2@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-cpu:v220816 cat PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp1-g1] $ docker top 7ac09922888ca0e0d8459513540702ad50c9384fbb02f538e426a164f5d98757 -eo pid,comm [Pipeline] { PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp2-g1] [Pipeline] sh [Pipeline] stage [Pipeline] { (Tensorflow CPU Unit test) [Pipeline] sh PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp3-g0] + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS'... + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@101e882; decorates RemoteLauncher[hudson.remoting.Channel@1d9b9638:dgl-manual-large-cpu] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp3-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp4-g0] Fetching without tags PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp4-g1] Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp2-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp3-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp3-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp4-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp0-g0] Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dlpack'... PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp1-g0] > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648@2/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp1-g1] [Pipeline] sh PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp2-g1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/METIS'... PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp3-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp4-g0] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/googletest'... PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp4-g1] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/dlpack'... PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp0-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp0-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp1-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp1-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp2-g0] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/dmlc-core'... PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp2-g1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp3-g1] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/googletest'... PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp4-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp0-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp0-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp1-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp1-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp2-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp2-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp3-g1] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/libxsmm'... PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp4-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp0-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp1-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp4-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp0-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp1-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp2-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp3-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp2-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp3-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp4-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp4-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp2-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp2-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp2-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp2-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g0] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/nccl'... PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g1] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/phmap'... PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g0] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nccl'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/phmap'... PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g0] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g1] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe'... PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g0] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust'... PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g1] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/thrust'... PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g1] Running on dgl-manual-large-cpu in /root/jenkins/workspace/dgl_PR-4648@3 [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm'... Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648@3 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp0-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g1] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tvm'... Cleaning workspace Fetching without tags PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g1] Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g0] Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g1] Commit message: "fix for pytorch < 1.12" Cleaning workspace PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g0] > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g1] + docker pull dgllib/dgl-ci-cpu:v220816 PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g0] v220816: Pulling from dgllib/dgl-ci-cpu Digest: sha256:64b385c33b44dc57cb96ff264a84d8dfb8ced0caa9b30fbc4cec6d5ee511b099 Status: Image is up to date for dgllib/dgl-ci-cpu:v220816 docker.io/dgllib/dgl-ci-cpu:v220816 PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g0] [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g1] + docker inspect -f . dgllib/dgl-ci-cpu:v220816 . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g1] dgl-manual-large-cpu does not seem to be running inside a container $ docker run -t -d -u 0:0 --shm-size=4gb -w /root/jenkins/workspace/dgl_PR-4648@3 -v /root/jenkins/workspace/dgl_PR-4648@3:/root/jenkins/workspace/dgl_PR-4648@3:rw,z -v /root/jenkins/workspace/dgl_PR-4648@3@tmp:/root/jenkins/workspace/dgl_PR-4648@3@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-cpu:v220816 cat PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g1] $ docker top af5aa5eeb7433ab669abbd955ede40baf0b228908472383bbed72d33561f7092 -eo pid,comm PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g0] [Pipeline] { [Pipeline] stage [Pipeline] { (Torch CPU Unit test) [Pipeline] sh PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp0-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp0-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp1-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp1-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp2-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp3-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp3-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp4-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp4-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-src-g0] + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-dst-g0] [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@78894af6; decorates RemoteLauncher[hudson.remoting.Channel@1d9b9638:dgl-manual-large-cpu] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-src-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-dst-g0] Fetching without tags PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-src-g0] Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-dst-g0] Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-src-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-dst-g0] > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648@3/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-src-g0] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/xbyak'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/METIS/GKlib'... PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-dst-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-src-g0] [Pipeline] sh PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-dst-g0] Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[sum] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[max] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/METIS'... PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[min] Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[mean] Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/dlpack'... PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-idtype1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_use_libxsmm_switch SKIPPED (Only ...) [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[idtype0] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[idtype1] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[idtype0] Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[idtype1] PASSED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[idtype0] PASSED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[idtype1] Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/dmlc-core'... PASSED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype0] PASSED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype1] PASSED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[idtype0] PASSED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[idtype1] Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' PASSED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[idtype0] PASSED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_edge_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph1[idtype0] Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe/third_party/googletest'... PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph1[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_message_passing PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype0] Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/googletest'... PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_subframes[/cpu:0-parent_idx_device0] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[/cpu:0-parent_idx_device1] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[/cpu:0-parent_idx_device2] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[/cpu:0-parent_idx_device3] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[/gpu:0-parent_idx_device0] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[/gpu:0-parent_idx_device1] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[/gpu:0-parent_idx_device2] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[/gpu:0-parent_idx_device3] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype0-/cpu:0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype0-/gpu:0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype1-/cpu:0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype1-/gpu:0] SKIPPED [ 96%] tests/compute/test_transform.py::test_line_graph1 PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[idtype0] PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_no_backtracking PASSED [ 96%] tests/compute/test_transform.py::test_reverse[idtype0] PASSED [ 96%] tests/compute/test_transform.py::test_reverse[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_to_bidirected SKIPPED (GPU not...) [ 96%] tests/compute/test_transform.py::test_add_reverse_edges PASSED [ 96%] tests/compute/test_transform.py::test_simple_graph SKIPPED (GPU not ...) [ 96%] tests/compute/test_transform.py::test_khop_graph SKIPPED (GPU not im...) [ 96%] tests/compute/test_transform.py::test_khop_adj SKIPPED (GPU not impl...) [ 96%] tests/compute/test_transform.py::test_laplacian_lambda_max SKIPPED (...) [ 97%] tests/compute/test_transform.py::test_partition_with_halo SKIPPED (G...) [ 97%] tests/compute/test_transform.py::test_metis_partition[idtype0] SKIPPED [ 97%] tests/compute/test_transform.py::test_metis_partition[idtype1] SKIPPED [ 97%] tests/compute/test_transform.py::test_reorder_nodes SKIPPED (It does...) [ 97%] tests/compute/test_transform.py::test_compact[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_compact[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_to_simple[idtype0] SKIPPED (GP...) [ 97%] tests/compute/test_transform.py::test_to_simple[idtype1] SKIPPED (GP...) [ 97%] tests/compute/test_transform.py::test_to_block[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_to_block[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[idtype1] Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[idtype1] Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... PASSED [ 97%] tests/compute/test_transform.py::test_add_selfloop[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_selfloop[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_norm_by_dst[idtype0] SKIPPED (...) [ 98%] tests/compute/test_transform.py::test_norm_by_dst[idtype1] SKIPPED (...) [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_to_simple[idtype0] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_to_simple[idtype1] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_line_graph[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_line_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_ppr[idtype0] SKIPPED (O...) [ 98%] tests/compute/test_transform.py::test_module_ppr[idtype1] SKIPPED (O...) [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[idtype0] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[idtype1] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_gdc[idtype0] SKIPPED (O...) [ 99%] tests/compute/test_transform.py::test_module_gdc[idtype1] SKIPPED (O...) [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_node[idtype0] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_node[idtype1] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[idtype0] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[idtype1] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_add_edge[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_add_edge[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_sign[g0] SKIPPED (Only ...) [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[idtype0] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[idtype1] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[idtype0] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[idtype1] SKIPPED [ 99%] tests/compute/test_traversal.py::test_bfs[idtype0] Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/libxsmm'... PASSED [ 99%] tests/compute/test_traversal.py::test_bfs[idtype1] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe/third_party/libnop'... PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[idtype1] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[idtype1] PASSED [100%] =============================== warnings summary =============================== python/dgl/backend/backend.py:1741 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/backend.py:1741: DeprecationWarning: invalid escape sequence \P """ ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/iterator_ops.py:546 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/iterator_ops.py:546: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working class IteratorBase(collections.Iterator, trackable.Trackable, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py:106 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py:106: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working class DatasetV2(collections.Iterable, tracking_base.Trackable, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/autograph/utils/testing.py:21 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/autograph/utils/testing.py:21: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses import imp ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _nlv = LooseVersion(_np_version) ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p16 = _nlv < LooseVersion("1.16") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p17 = _nlv < LooseVersion("1.17") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p18 = _nlv < LooseVersion("1.18") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p19 = _nlv < LooseVersion("1.19") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p20 = _nlv < LooseVersion("1.20") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. other = LooseVersion(other) ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(_np_version) >= LooseVersion("1.17.0"): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:23 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:23: DeprecationWarning: NEAREST is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.NEAREST or Dither.NONE instead. 'nearest': pil_image.NEAREST, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:24 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:24: DeprecationWarning: BILINEAR is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BILINEAR instead. 'bilinear': pil_image.BILINEAR, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:25 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:25: DeprecationWarning: BICUBIC is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BICUBIC instead. 'bicubic': pil_image.BICUBIC, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:28 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:28: DeprecationWarning: HAMMING is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.HAMMING instead. if hasattr(pil_image, 'HAMMING'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:29 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:29: DeprecationWarning: HAMMING is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.HAMMING instead. _PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:30 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:30: DeprecationWarning: BOX is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BOX instead. if hasattr(pil_image, 'BOX'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:31 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:31: DeprecationWarning: BOX is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BOX instead. _PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:33 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:33: DeprecationWarning: LANCZOS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead. if hasattr(pil_image, 'LANCZOS'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:34 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:34: DeprecationWarning: LANCZOS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead. _PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS python/dgl/backend/tensorflow/tensor.py:15 python/dgl/backend/tensorflow/tensor.py:15 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/tensorflow/tensor.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(tf.__version__) < LooseVersion("2.3.0"): tests/compute/test_basics.py: 2 warnings tests/compute/test_filter.py: 1 warning tests/compute/test_graph.py: 9 warnings tests/compute/test_kernel.py: 3 warnings tests/compute/test_removal.py: 16 warnings tests/compute/test_specialization.py: 12 warnings tests/compute/test_subgraph.py: 2 warnings tests/compute/test_transform.py: 6 warnings tests/compute/test_traversal.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/compute/test_basics.py: 2 warnings tests/compute/test_batched_graph.py: 8 warnings tests/compute/test_graph.py: 2 warnings tests/compute/test_kernel.py: 1 warning tests/compute/test_removal.py: 10 warnings tests/compute/test_specialization.py: 10 warnings tests/compute/test_subgraph.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:354: DGLWarning: DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges dgl_warning("DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges") tests/compute/test_basics.py::test_update_all_0deg[idtype0] tests/compute/test_basics.py::test_update_all_0deg[idtype1] tests/compute/test_basics.py::test_pull_0deg[idtype0] tests/compute/test_basics.py::test_pull_0deg[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/core.py:79: DGLWarning: The input graph for the user-defined edge function does not contain valid edges dgl_warning('The input graph for the user-defined edge function ' \ tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype0] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype1] tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_transform.py::test_no_backtracking tests/compute/test_transform.py::test_reverse[idtype0] tests/compute/test_transform.py::test_reverse[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2978: DGLWarning: DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids. dgl_warning("DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids.") tests/compute/test_batched_heterograph.py::test_features[idtype0] tests/compute/test_batched_heterograph.py::test_features[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/batch.py:159: DGLWarning: Arguments edge_attrs has been deprecated. Please use edata instead. dgl_warning('Arguments edge_attrs has been deprecated. Please use' tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype0] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype1] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype0] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype1] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype0] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype1] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype0] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph_index.py:797: FutureWarning: Adjacency matrix by default currently returns edge IDs. As a result there is one 0 entry which is not eliminated. In the next release it will return 1s by default, and 0 will be eliminated otherwise. FutureWarning) tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_heterograph.py::test_query[idtype0] tests/compute/test_heterograph.py::test_query[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2753: DGLWarning: DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes dgl_warning("DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2687: DGLWarning: DGLGraph.__contains__ is deprecated. Please directly call has_nodes. dgl_warning('DGLGraph.__contains__ is deprecated.' tests/compute/test_graph.py::test_query tests/compute/test_sampling.py::test_non_uniform_random_walk[False] tests/compute/test_sampling.py::test_uniform_random_walk[True] tests/compute/test_sampling.py::test_uniform_random_walk[False] tests/compute/test_transform.py::test_no_backtracking /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2851: DGLWarning: DGLGraph.has_edge_between is deprecated. Please use DGLGraph.has_edges_between dgl_warning("DGLGraph.has_edge_between is deprecated. " tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:3432: DGLWarning: DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees dgl_warning("DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:3516: DGLWarning: DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees dgl_warning("DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees") tests/compute/test_graph.py::test_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly', 'sort_csr'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648/tests/compute/test_heterograph.py:1128: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(src_i)) == nid[src[i]] tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648/tests/compute/test_heterograph.py:1129: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(dst_i)) == nid[dst[i]] tests/compute/test_heterograph.py::test_invertible_conversion[idtype0] tests/compute/test_heterograph.py::test_invertible_conversion[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2635: DGLWarning: DGLGraph.is_readonly is deprecated in v0.5. DGLGraph now always supports mutable operations like add_nodes and add_edges. dgl_warning('DGLGraph.is_readonly is deprecated in v0.5.\n' tests/compute/test_partition.py::test_get_node_partition_from_book[idtype0] tests/compute/test_partition.py::test_get_node_partition_from_book[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648/python/dgl/contrib/sampling/sampler.py:317: DGLWarning: dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5.' tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_layer_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648/python/dgl/_deprecate/nodeflow.py:99: DGLWarning: NodeFlow APIs are deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('NodeFlow APIs are deprecated starting from v0.5. Please read our' tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/sampling/negative.py:102: ComplexWarning: Casting complex values to real discards the imaginary part g._graph, etype_id, num_samples, 3, exclude_self_loops, replace, redundancy) tests/compute/test_serialize.py::test_load_old_files1 tests/compute/test_serialize.py::test_load_old_files2 /root/jenkins/workspace/dgl_PR-4648/python/dgl/data/graph_serialize.py:179: DGLWarning: You are loading a graph file saved by old version of dgl. Please consider saving it again with the current format. Please consider saving it again with the current format.") tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/transforms/functional.py:1267: DGLWarning: share_ndata argument has been renamed to copy_ndata. dgl_warning('share_ndata argument has been renamed to copy_ndata.') tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/transforms/functional.py:1270: DGLWarning: share_edata argument has been renamed to copy_edata. dgl_warning('share_edata argument has been renamed to copy_edata.') -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html -- generated xml file: /root/jenkins/workspace/dgl_PR-4648/pytest_compute.xml -- ============================ slowest 100 durations ============================= 30.41s call tests/compute/test_kernel.py::test_all_binary_builtins 3.52s call tests/compute/test_heterograph.py::test_forking_pickler 0.97s call tests/compute/test_sampling.py::test_non_uniform_random_walk[False] 0.84s call tests/compute/test_graph.py::test_query 0.67s call tests/compute/test_sampling.py::test_uniform_random_walk[False] 0.59s call tests/compute/test_heterograph.py::test_query[idtype1] 0.53s call tests/compute/test_heterograph.py::test_query[idtype0] 0.49s call tests/compute/test_sampling.py::test_uniform_random_walk[True] 0.43s call tests/compute/test_sampling.py::test_sample_neighbors_outedge 0.33s call tests/compute/test_basics.py::test_batch_setter_getter[idtype0] 0.33s call tests/compute/test_kernel.py::test_copy_edge_reduce 0.31s call tests/compute/test_kernel.py::test_copy_src_reduce 0.27s call tests/compute/test_traversal.py::test_bfs[idtype1] 0.27s call tests/compute/test_traversal.py::test_bfs[idtype0] 0.26s call tests/compute/test_heterograph.py::test_view1[idtype1] 0.24s call tests/compute/test_sampler.py::test_10neighbor_sampler 0.24s call tests/compute/test_sampling.py::test_sample_neighbors_prob 0.24s call tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g0] 0.23s call tests/compute/test_sampler.py::test_prefetch_neighbor_sampler 0.23s call tests/compute/test_sampler.py::test_1neighbor_sampler 0.23s call tests/compute/test_sampling.py::test_sample_neighbors_noprob 0.23s call tests/compute/test_heterograph.py::test_view1[idtype0] 0.22s call tests/compute/test_basics.py::test_issue_1088[idtype0] 0.18s call tests/compute/test_sparse.py::test_segment_reduce[mean] 0.18s call tests/compute/test_sparse.py::test_segment_reduce[min] 0.18s call tests/compute/test_sparse.py::test_segment_reduce[max] 0.18s call tests/compute/test_sparse.py::test_segment_reduce[sum] 0.18s call tests/compute/test_heterograph.py::test_updates[idtype1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-src-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-src-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-src-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-src-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-dst-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-dst-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-src-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g1] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-dst-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-dst-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-dst-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-src-g0] 0.18s call tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-dst-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g1] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g0] 0.18s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g1] 0.17s call tests/compute/test_heterograph.py::test_updates[idtype0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g1] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g0] 0.17s call tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g1] ========= 2227 passed, 231 skipped, 216 warnings in 233.36s (0:03:53) ========== Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe/third_party/libuv'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/tensorflow-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648 collecting ... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tensorpipe/third_party/pybind11/tools/clang'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/thrust/dependencies/cub'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' collected 920 items tests/tensorflow/test_basic.py::test PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv[1] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv[2] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g0-idtype0] Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g0-idtype1] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g1-idtype0] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g1-idtype1] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g2-idtype0] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g2-idtype1] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g3-idtype0] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g3-idtype1] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g4-idtype0] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g4-idtype1] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g5-idtype0] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g5-idtype1] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g6-idtype0] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g6-idtype1] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g0-idtype0] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g0-idtype1] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g1-idtype0] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g1-idtype1] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g2-idtype0] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g2-idtype1] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g3-idtype0] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g3-idtype1] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g4-idtype0] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g4-idtype1] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g5-idtype0] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g5-idtype1] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g6-idtype0] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g6-idtype1] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g0-idtype0] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g0-idtype1] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g1-idtype0] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g1-idtype1] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g2-idtype0] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g2-idtype1] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g3-idtype0] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g3-idtype1] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g4-idtype0] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g4-idtype1] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g5-idtype0] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g5-idtype1] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g6-idtype0] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g6-idtype1] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g0-idtype0] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g0-idtype1] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g1-idtype0] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g1-idtype1] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g2-idtype0] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g2-idtype1] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g3-idtype0] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g3-idtype1] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g4-idtype0] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g4-idtype1] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g5-idtype0] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g5-idtype1] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g6-idtype0] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g6-idtype1] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g0-idtype0] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g0-idtype1] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g1-idtype0] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g1-idtype1] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g2-idtype0] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g2-idtype1] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g3-idtype0] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g3-idtype1] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g4-idtype0] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g4-idtype1] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g5-idtype0] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g5-idtype1] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g6-idtype0] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g6-idtype1] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g0-idtype0] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g0-idtype1] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g1-idtype0] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g1-idtype1] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g2-idtype0] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g2-idtype1] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g3-idtype0] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g3-idtype1] Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g4-idtype0] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g4-idtype1] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g5-idtype0] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g5-idtype1] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g6-idtype0] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g6-idtype1] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g0-idtype0] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g0-idtype1] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g1-idtype0] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g1-idtype1] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g2-idtype0] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g2-idtype1] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g3-idtype0] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g3-idtype1] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g4-idtype0] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g4-idtype1] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g5-idtype0] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g5-idtype1] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g6-idtype0] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g6-idtype1] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g0-idtype0] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g0-idtype1] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g1-idtype0] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g1-idtype1] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g2-idtype0] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g2-idtype1] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g3-idtype0] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g3-idtype1] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g4-idtype0] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g4-idtype1] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g5-idtype0] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g5-idtype1] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g6-idtype0] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g6-idtype1] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g0-idtype0] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g0-idtype1] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g1-idtype0] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g1-idtype1] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g2-idtype0] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g2-idtype1] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g3-idtype0] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g3-idtype1] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g4-idtype0] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g4-idtype1] Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g5-idtype0] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g5-idtype1] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g6-idtype0] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g6-idtype1] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g0-idtype0] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g0-idtype1] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g1-idtype0] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g1-idtype1] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g2-idtype0] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g2-idtype1] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g3-idtype0] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g3-idtype1] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g4-idtype0] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g4-idtype1] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g5-idtype0] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g5-idtype1] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g6-idtype0] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g6-idtype1] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g0-idtype0] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g0-idtype1] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g1-idtype0] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g1-idtype1] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g2-idtype0] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g2-idtype1] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g3-idtype0] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g3-idtype1] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g4-idtype0] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g4-idtype1] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g5-idtype0] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g5-idtype1] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g6-idtype0] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g6-idtype1] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g0-idtype0] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g0-idtype1] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g1-idtype0] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g1-idtype1] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g2-idtype0] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g2-idtype1] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g3-idtype0] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g3-idtype1] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g4-idtype0] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g4-idtype1] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g5-idtype0] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g5-idtype1] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g6-idtype0] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g6-idtype1] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g0-idtype0] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g0-idtype1] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g1-idtype0] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g1-idtype1] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g2-idtype0] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g2-idtype1] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g3-idtype0] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g3-idtype1] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g4-idtype0] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g4-idtype1] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g5-idtype0] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g5-idtype1] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g6-idtype0] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g6-idtype1] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g0-idtype0] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g0-idtype1] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g1-idtype0] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g1-idtype1] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g2-idtype0] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g2-idtype1] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g3-idtype0] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g3-idtype1] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g4-idtype0] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g4-idtype1] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g5-idtype0] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g5-idtype1] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g6-idtype0] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g6-idtype1] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g0-idtype0] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g0-idtype1] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g1-idtype0] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g1-idtype1] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g2-idtype0] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g2-idtype1] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g3-idtype0] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g3-idtype1] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g4-idtype0] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g4-idtype1] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g5-idtype0] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g5-idtype1] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g6-idtype0] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g6-idtype1] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g0-idtype0] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g0-idtype1] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g1-idtype0] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g1-idtype1] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g2-idtype0] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g2-idtype1] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g3-idtype0] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g3-idtype1] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g4-idtype0] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g4-idtype1] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g5-idtype0] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g5-idtype1] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g6-idtype0] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g6-idtype1] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g0-idtype0] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g0-idtype1] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g1-idtype0] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g1-idtype1] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g2-idtype0] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g2-idtype1] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g3-idtype0] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g3-idtype1] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g4-idtype0] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g4-idtype1] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g5-idtype0] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g5-idtype1] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g6-idtype0] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g6-idtype1] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g0-idtype0] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g0-idtype1] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g1-idtype0] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g1-idtype1] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g2-idtype0] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g2-idtype1] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g3-idtype0] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g3-idtype1] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g4-idtype0] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g4-idtype1] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g5-idtype0] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g5-idtype1] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g6-idtype0] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g6-idtype1] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g0-idtype0] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g0-idtype1] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g1-idtype0] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g1-idtype1] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g2-idtype0] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g2-idtype1] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g3-idtype0] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g3-idtype1] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g4-idtype0] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g4-idtype1] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g5-idtype0] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g5-idtype1] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g6-idtype0] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g6-idtype1] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g0-idtype0] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g0-idtype1] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g1-idtype0] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g1-idtype1] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g2-idtype0] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g2-idtype1] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g3-idtype0] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g3-idtype1] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g4-idtype0] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g4-idtype1] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g5-idtype0] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g5-idtype1] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g6-idtype0] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g6-idtype1] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g0-idtype0] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g0-idtype1] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g1-idtype0] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g1-idtype1] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g2-idtype0] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g2-idtype1] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g3-idtype0] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g3-idtype1] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g4-idtype0] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g4-idtype1] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g5-idtype0] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g5-idtype1] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g6-idtype0] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g6-idtype1] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g0-idtype0] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g0-idtype1] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g1-idtype0] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g1-idtype1] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g2-idtype0] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g2-idtype1] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g3-idtype0] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g3-idtype1] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g4-idtype0] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g4-idtype1] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g5-idtype0] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g5-idtype1] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g6-idtype0] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g6-idtype1] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g0-idtype0] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g0-idtype1] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g1-idtype0] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g1-idtype1] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g2-idtype0] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g2-idtype1] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g3-idtype0] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g3-idtype1] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g4-idtype0] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g4-idtype1] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g5-idtype0] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g5-idtype1] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g6-idtype0] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g6-idtype1] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g0-idtype0] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g0-idtype1] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g1-idtype0] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g1-idtype1] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g2-idtype0] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g2-idtype1] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g3-idtype0] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g3-idtype1] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g4-idtype0] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g4-idtype1] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g5-idtype0] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g5-idtype1] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g6-idtype0] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g6-idtype1] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g0-idtype0] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g0-idtype1] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g1-idtype0] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g1-idtype1] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g2-idtype0] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g2-idtype1] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g3-idtype0] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g3-idtype1] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g4-idtype0] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g4-idtype1] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g5-idtype0] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g5-idtype1] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g6-idtype0] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g6-idtype1] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g0-idtype0] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g0-idtype1] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g1-idtype0] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g1-idtype1] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g2-idtype0] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g2-idtype1] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g3-idtype0] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g3-idtype1] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g4-idtype0] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g4-idtype1] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g5-idtype0] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g5-idtype1] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g6-idtype0] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g6-idtype1] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g0-idtype0] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g0-idtype1] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g1-idtype0] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g1-idtype1] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g2-idtype0] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g2-idtype1] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g3-idtype0] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g3-idtype1] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g4-idtype0] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g4-idtype1] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g5-idtype0] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g5-idtype1] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g6-idtype0] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g6-idtype1] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g0-idtype0] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g0-idtype1] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g1-idtype0] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g1-idtype1] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g2-idtype0] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g2-idtype1] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g3-idtype0] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g3-idtype1] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g4-idtype0] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g4-idtype1] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g5-idtype0] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g5-idtype1] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g6-idtype0] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g6-idtype1] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g0-idtype0] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g0-idtype1] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g1-idtype0] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g1-idtype1] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g2-idtype0] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g2-idtype1] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g3-idtype0] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g3-idtype1] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g4-idtype0] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g4-idtype1] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g5-idtype0] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g5-idtype1] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g6-idtype0] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g6-idtype1] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g0-idtype0] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g0-idtype1] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g1-idtype0] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g1-idtype1] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g2-idtype0] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g2-idtype1] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g3-idtype0] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g3-idtype1] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g4-idtype0] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g4-idtype1] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g5-idtype0] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g5-idtype1] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g6-idtype0] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g6-idtype1] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g0-idtype0] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g0-idtype1] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g1-idtype0] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g1-idtype1] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g2-idtype0] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g2-idtype1] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g3-idtype0] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g3-idtype1] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g4-idtype0] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g4-idtype1] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g5-idtype0] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g5-idtype1] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g6-idtype0] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g6-idtype1] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g0-idtype0] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g0-idtype1] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g1-idtype0] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g1-idtype1] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g2-idtype0] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g2-idtype1] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g3-idtype0] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g3-idtype1] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g4-idtype0] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g4-idtype1] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g5-idtype0] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g5-idtype1] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g6-idtype0] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g6-idtype1] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-none-g0-idtype0] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-none-g0-idtype1] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-none-g1-idtype0] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-none-g1-idtype1] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-both-g0-idtype0] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-both-g0-idtype1] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-both-g1-idtype0] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-both-g1-idtype1] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-right-g0-idtype0] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-right-g0-idtype1] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-right-g1-idtype0] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-right-g1-idtype1] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-none-g0-idtype0] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-none-g0-idtype1] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-none-g1-idtype0] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-none-g1-idtype1] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-both-g0-idtype0] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-both-g0-idtype1] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-both-g1-idtype0] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-both-g1-idtype1] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-right-g0-idtype0] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-right-g0-idtype1] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-right-g1-idtype0] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-right-g1-idtype1] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-none-g0-idtype0] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-none-g0-idtype1] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-none-g1-idtype0] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-none-g1-idtype1] Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-both-g0-idtype0] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-both-g0-idtype1] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-both-g1-idtype0] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-both-g1-idtype1] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-right-g0-idtype0] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-right-g0-idtype1] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-right-g1-idtype0] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-right-g1-idtype1] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-none-g0-idtype0] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-none-g0-idtype1] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-none-g1-idtype0] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-none-g1-idtype1] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-both-g0-idtype0] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-both-g0-idtype1] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-both-g1-idtype0] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-both-g1-idtype1] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-right-g0-idtype0] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-right-g0-idtype1] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-right-g1-idtype0] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-right-g1-idtype1] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-none-g0-idtype0] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-none-g0-idtype1] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-none-g1-idtype0] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-none-g1-idtype1] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-both-g0-idtype0] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-both-g0-idtype1] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-both-g1-idtype0] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-both-g1-idtype1] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-right-g0-idtype0] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-right-g0-idtype1] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-right-g1-idtype0] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-right-g1-idtype1] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-none-g0-idtype0] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-none-g0-idtype1] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-none-g1-idtype0] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-none-g1-idtype1] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-both-g0-idtype0] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-both-g0-idtype1] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-both-g1-idtype0] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-both-g1-idtype1] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-right-g0-idtype0] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-right-g0-idtype1] Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-right-g1-idtype0] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-right-g1-idtype1] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-none-g0-idtype0] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-none-g0-idtype1] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-none-g1-idtype0] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-none-g1-idtype1] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-both-g0-idtype0] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-both-g0-idtype1] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-both-g1-idtype0] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-both-g1-idtype1] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-right-g0-idtype0] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-right-g0-idtype1] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-right-g1-idtype0] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-right-g1-idtype1] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-none-g0-idtype0] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-none-g0-idtype1] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-none-g1-idtype0] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-none-g1-idtype1] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-both-g0-idtype0] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-both-g0-idtype1] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-both-g1-idtype0] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-both-g1-idtype1] PASSED [ 59%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-right-g0-idtype0] PASSED [ 59%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-right-g0-idtype1] PASSED [ 59%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-right-g1-idtype0] PASSED [ 59%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-right-g1-idtype1] PASSED [ 59%] tests/tensorflow/test_nn.py::test_simple_pool PASSED [ 59%] tests/tensorflow/test_nn.py::test_glob_att_pool PASSED [ 59%] tests/tensorflow/test_nn.py::test_rgcn[1] PASSED [ 59%] tests/tensorflow/test_nn.py::test_rgcn[2] PASSED [ 59%] tests/tensorflow/test_nn.py::test_rgcn[8] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g0-idtype0] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g0-idtype1] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g1-idtype0] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g1-idtype1] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g2-idtype0] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g2-idtype1] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g3-idtype0] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g3-idtype1] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g4-idtype0] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g4-idtype1] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g5-idtype0] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g5-idtype1] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g6-idtype0] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g6-idtype1] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g0-idtype0] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g0-idtype1] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g1-idtype0] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g1-idtype1] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g2-idtype0] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g2-idtype1] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g3-idtype0] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g3-idtype1] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g4-idtype0] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g4-idtype1] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g5-idtype0] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g5-idtype1] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g6-idtype0] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g6-idtype1] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g0-idtype0] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g0-idtype1] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g1-idtype0] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g1-idtype1] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g2-idtype0] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g2-idtype1] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g3-idtype0] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g3-idtype1] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g4-idtype0] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g4-idtype1] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g5-idtype0] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g5-idtype1] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g6-idtype0] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g6-idtype1] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g0-idtype0] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g0-idtype1] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g1-idtype0] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g1-idtype1] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g2-idtype0] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g2-idtype1] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g3-idtype0] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g3-idtype1] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g4-idtype0] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g4-idtype1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g5-idtype0] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g5-idtype1] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g6-idtype0] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g6-idtype1] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g0-idtype0] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g0-idtype1] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g1-idtype0] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g1-idtype1] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g0-idtype0] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g0-idtype1] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g1-idtype0] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g1-idtype1] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g0-idtype0] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g0-idtype1] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g1-idtype0] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g1-idtype1] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g0-idtype0] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g0-idtype1] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g1-idtype0] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g1-idtype1] PASSED [ 67%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g0-idtype0] PASSED [ 67%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g0-idtype1] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g1-idtype0] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g1-idtype1] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g2-idtype0] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g2-idtype1] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g3-idtype0] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g3-idtype1] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g4-idtype0] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g4-idtype1] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g5-idtype0] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g5-idtype1] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g6-idtype0] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g6-idtype1] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g7-idtype0] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g7-idtype1] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g0-idtype0] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g0-idtype1] Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g1-idtype0] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g1-idtype1] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g2-idtype0] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g2-idtype1] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g3-idtype0] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g3-idtype1] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g4-idtype0] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g4-idtype1] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g5-idtype0] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g5-idtype1] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g6-idtype0] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g6-idtype1] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g7-idtype0] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g7-idtype1] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g0-idtype0] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g0-idtype1] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g1-idtype0] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g1-idtype1] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g2-idtype0] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g2-idtype1] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g3-idtype0] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g3-idtype1] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g4-idtype0] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g4-idtype1] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g5-idtype0] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g5-idtype1] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g6-idtype0] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g6-idtype1] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g7-idtype0] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g7-idtype1] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g0-idtype0] Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g0-idtype1] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g1-idtype0] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g1-idtype1] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g2-idtype0] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g2-idtype1] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g3-idtype0] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g3-idtype1] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g4-idtype0] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g4-idtype1] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g5-idtype0] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g5-idtype1] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g6-idtype0] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g6-idtype1] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g7-idtype0] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g7-idtype1] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g0-idtype0] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g0-idtype1] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g1-idtype0] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g1-idtype1] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g2-idtype0] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g2-idtype1] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g3-idtype0] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g3-idtype1] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g4-idtype0] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g4-idtype1] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g5-idtype0] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g5-idtype1] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g6-idtype0] Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g6-idtype1] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g7-idtype0] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g7-idtype1] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g0-idtype0] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g0-idtype1] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g1-idtype0] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g1-idtype1] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g2-idtype0] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g2-idtype1] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g3-idtype0] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g3-idtype1] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g4-idtype0] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g4-idtype1] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g5-idtype0] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g5-idtype1] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g6-idtype0] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g6-idtype1] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g7-idtype0] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g7-idtype1] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g0-idtype0] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g0-idtype1] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g1-idtype0] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g1-idtype1] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g2-idtype0] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g2-idtype1] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g0-idtype0] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g0-idtype1] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g1-idtype0] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g1-idtype1] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g2-idtype0] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g2-idtype1] [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-cpu-linux [Pipeline] sh PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g0-idtype0] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g0-idtype1] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g1-idtype0] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g1-idtype1] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g2-idtype0] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g2-idtype1] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g0-idtype0] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g0-idtype1] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g1-idtype0] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g1-idtype1] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g2-idtype0] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g2-idtype1] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g0-idtype0] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g0-idtype1] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g1-idtype0] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g1-idtype1] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g2-idtype0] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g2-idtype1] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g0-idtype0] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g0-idtype1] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g1-idtype0] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g1-idtype1] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g2-idtype0] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g2-idtype1] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-mean-idtype0] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-mean-idtype1] + bash tests/scripts/task_cpp_unit_test.sh /root/jenkins/workspace/dgl_PR-4648 ~/jenkins/workspace/dgl_PR-4648/build ~/jenkins/workspace/dgl_PR-4648 total 14M -rwxr-xr-x 1 root root 12M Sep 27 04:43 libdgl.so -rwxr-xr-x 1 root root 2.4M Sep 27 04:44 runUnitTests drwxr-xr-x 3 root root 4.0K Sep 27 05:23 tensoradapter Running main() from /root/jenkins/workspace/dgl_PR-4648/third_party/googletest/googletest/src/gtest_main.cc [==========] Running 101 tests from 23 test suites. [----------] Global test environment set-up. [----------] 1 test from GraphTest [ RUN ] GraphTest.TestNumVertices [ OK ] GraphTest.TestNumVertices (0 ms) [----------] 1 test from GraphTest (0 ms total) [----------] 3 tests from MessageQueueTest [ RUN ] MessageQueueTest.AddRemove [05:23:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/network/msg_queue.cc:27: Message is larger than the queue. [ OK ] MessageQueueTest.AddRemove (0 ms) [ RUN ] MessageQueueTest.EmptyAndNoMoreAdd [ OK ] MessageQueueTest.EmptyAndNoMoreAdd (0 ms) [ RUN ] MessageQueueTest.MultiThread PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-pool-idtype0] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-pool-idtype1] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-gcn-idtype0] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-gcn-idtype1] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-mean-idtype0] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-mean-idtype1] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-pool-idtype0] [ OK ] MessageQueueTest.MultiThread (11 ms) [----------] 3 tests from MessageQueueTest (11 ms total) [----------] 3 tests from SocketCommunicatorTest [ RUN ] SocketCommunicatorTest.SendAndRecv PASSED [ 83%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-pool-idtype1] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-gcn-idtype0] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-gcn-idtype1] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g0-idtype0] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g0-idtype1] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g1-idtype0] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g1-idtype1] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g2-idtype0] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g2-idtype1] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g3-idtype0] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g3-idtype1] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g4-idtype0] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g4-idtype1] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g5-idtype0] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g5-idtype1] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g0-idtype0] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g0-idtype1] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g1-idtype0] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g1-idtype1] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g2-idtype0] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g2-idtype1] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g3-idtype0] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g3-idtype1] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g4-idtype0] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g4-idtype1] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g5-idtype0] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g5-idtype1] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g0-idtype0] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g0-idtype1] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g1-idtype0] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g1-idtype1] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g2-idtype0] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g2-idtype1] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g3-idtype0] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g3-idtype1] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g4-idtype0] PASSED [ 87%] tests/tensorflow/test_nn.py::test_appnp_conv[g4-idtype1] PASSED [ 87%] tests/tensorflow/test_nn.py::test_appnp_conv[g5-idtype0] PASSED [ 87%] tests/tensorflow/test_nn.py::test_appnp_conv[g5-idtype1] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g0-idtype0] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g0-idtype1] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g1-idtype0] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g1-idtype1] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g2-idtype0] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g2-idtype1] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g3-idtype0] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g3-idtype1] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g4-idtype0] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g4-idtype1] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g5-idtype0] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g5-idtype1] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g6-idtype0] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g6-idtype1] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g7-idtype0] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g7-idtype1] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g0-idtype0] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g0-idtype1] Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tvm/3rdparty/dlpack'... PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g1-idtype0] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g1-idtype1] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g2-idtype0] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g2-idtype1] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g3-idtype0] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g3-idtype1] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g4-idtype0] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g4-idtype1] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g5-idtype0] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g5-idtype1] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g6-idtype0] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g6-idtype1] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g7-idtype0] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g7-idtype1] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g0-idtype0] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g0-idtype1] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g1-idtype0] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g1-idtype1] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g2-idtype0] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g2-idtype1] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g3-idtype0] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g3-idtype1] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g4-idtype0] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g4-idtype1] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g5-idtype0] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g5-idtype1] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g6-idtype0] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g6-idtype1] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g7-idtype0] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g7-idtype1] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g0-idtype0] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g0-idtype1] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g1-idtype0] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g1-idtype1] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g2-idtype0] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g2-idtype1] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g0-idtype0] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g0-idtype1] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g1-idtype0] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g1-idtype1] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g2-idtype0] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g2-idtype1] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tvm/3rdparty/dmlc-core'... PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g0-idtype0] PASSED [ 94%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g0-idtype1] PASSED [ 94%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g1-idtype0] PASSED [ 94%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g1-idtype1] PASSED [ 94%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g2-idtype0] PASSED [ 94%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g2-idtype1] PASSED [ 94%] tests/tensorflow/test_nn.py::test_edge_conv[1-g0-idtype0] PASSED [ 94%] tests/tensorflow/test_nn.py::test_edge_conv[1-g0-idtype1] PASSED [ 94%] tests/tensorflow/test_nn.py::test_edge_conv[1-g1-idtype0] PASSED [ 94%] tests/tensorflow/test_nn.py::test_edge_conv[1-g1-idtype1] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g2-idtype0] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g2-idtype1] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g3-idtype0] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g3-idtype1] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g4-idtype0] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g4-idtype1] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g5-idtype0] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g5-idtype1] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g6-idtype0] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g6-idtype1] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g0-idtype0] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g0-idtype1] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g1-idtype0] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g1-idtype1] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g2-idtype0] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g2-idtype1] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g3-idtype0] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g3-idtype1] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g4-idtype0] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv[2-g4-idtype1] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv[2-g5-idtype0] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv[2-g5-idtype1] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv[2-g6-idtype0] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv[2-g6-idtype1] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv_bi[1-g0-idtype0] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv_bi[1-g0-idtype1] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv_bi[1-g1-idtype0] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv_bi[1-g1-idtype1] PASSED [ 98%] tests/tensorflow/test_nn.py::test_edge_conv_bi[2-g0-idtype0] PASSED [ 98%] tests/tensorflow/test_nn.py::test_edge_conv_bi[2-g0-idtype1] PASSED [ 98%] tests/tensorflow/test_nn.py::test_edge_conv_bi[2-g1-idtype0] PASSED [ 98%] tests/tensorflow/test_nn.py::test_edge_conv_bi[2-g1-idtype1] PASSED [ 98%] tests/tensorflow/test_nn.py::test_hetero_conv[sum-idtype0] PASSED [ 98%] tests/tensorflow/test_nn.py::test_hetero_conv[sum-idtype1] PASSED [ 98%] tests/tensorflow/test_nn.py::test_hetero_conv[max-idtype0] PASSED [ 98%] tests/tensorflow/test_nn.py::test_hetero_conv[max-idtype1] PASSED [ 98%] tests/tensorflow/test_nn.py::test_hetero_conv[min-idtype0] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[min-idtype1] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[mean-idtype0] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[mean-idtype1] Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tvm/3rdparty/rang'... PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[stack-idtype0] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[stack-idtype1] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[myagg-idtype0] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[myagg-idtype1] PASSED [ 99%] tests/tensorflow/test_nn.py::test_dense_cheb_conv[1] PASSED [ 99%] tests/tensorflow/test_nn.py::test_dense_cheb_conv[2] PASSED [100%] =============================== warnings summary =============================== ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/iterator_ops.py:546 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/iterator_ops.py:546: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working class IteratorBase(collections.Iterator, trackable.Trackable, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py:106 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py:106: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working class DatasetV2(collections.Iterable, tracking_base.Trackable, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/autograph/utils/testing.py:21 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/autograph/utils/testing.py:21: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses import imp ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _nlv = LooseVersion(_np_version) ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p16 = _nlv < LooseVersion("1.16") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p17 = _nlv < LooseVersion("1.17") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p18 = _nlv < LooseVersion("1.18") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p19 = _nlv < LooseVersion("1.19") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p20 = _nlv < LooseVersion("1.20") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. other = LooseVersion(other) ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(_np_version) >= LooseVersion("1.17.0"): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:23 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:23: DeprecationWarning: NEAREST is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.NEAREST or Dither.NONE instead. 'nearest': pil_image.NEAREST, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:24 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:24: DeprecationWarning: BILINEAR is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BILINEAR instead. 'bilinear': pil_image.BILINEAR, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:25 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:25: DeprecationWarning: BICUBIC is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BICUBIC instead. 'bicubic': pil_image.BICUBIC, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:28 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:28: DeprecationWarning: HAMMING is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.HAMMING instead. if hasattr(pil_image, 'HAMMING'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:29 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:29: DeprecationWarning: HAMMING is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.HAMMING instead. _PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:30 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:30: DeprecationWarning: BOX is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BOX instead. if hasattr(pil_image, 'BOX'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:31 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:31: DeprecationWarning: BOX is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BOX instead. _PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:33 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:33: DeprecationWarning: LANCZOS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead. if hasattr(pil_image, 'LANCZOS'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:34 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:34: DeprecationWarning: LANCZOS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead. _PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS python/dgl/backend/tensorflow/tensor.py:15 python/dgl/backend/tensorflow/tensor.py:15 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/tensorflow/tensor.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(tf.__version__) < LooseVersion("2.3.0"): tests/tensorflow/test_nn.py::test_graph_conv[1] tests/tensorflow/test_nn.py::test_graph_conv[2] tests/tensorflow/test_nn.py::test_simple_pool tests/tensorflow/test_nn.py::test_glob_att_pool tests/tensorflow/test_nn.py::test_rgcn[1] tests/tensorflow/test_nn.py::test_rgcn[2] tests/tensorflow/test_nn.py::test_rgcn[8] tests/tensorflow/test_nn.py::test_dense_cheb_conv[1] tests/tensorflow/test_nn.py::test_dense_cheb_conv[2] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/tensorflow/test_nn.py::test_rgcn[1] tests/tensorflow/test_nn.py::test_rgcn[2] tests/tensorflow/test_nn.py::test_rgcn[8] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html -- generated xml file: /root/jenkins/workspace/dgl_PR-4648/pytest_backend.xml -- ============================ slowest 100 durations ============================= 0.32s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g0-idtype0] 0.26s call tests/tensorflow/test_nn.py::test_graph_conv[1] 0.12s call tests/tensorflow/test_nn.py::test_rgcn[8] 0.11s call tests/tensorflow/test_nn.py::test_rgcn[2] 0.06s call tests/tensorflow/test_nn.py::test_rgcn[1] 0.06s call tests/tensorflow/test_nn.py::test_hetero_conv[sum-idtype0] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[sum-idtype1] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[mean-idtype1] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[min-idtype1] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[max-idtype1] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[min-idtype0] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[myagg-idtype1] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[stack-idtype1] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[mean-idtype0] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[max-idtype0] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[myagg-idtype0] 0.05s call tests/tensorflow/test_nn.py::test_hetero_conv[stack-idtype0] 0.04s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-mean-idtype0] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-gcn-idtype0] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-mean-idtype0] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-gcn-idtype0] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-pool-idtype0] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-pool-idtype0] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-gcn-idtype1] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-mean-idtype1] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-pool-idtype1] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-pool-idtype1] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-mean-idtype1] 0.03s call tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-gcn-idtype1] 0.03s call tests/tensorflow/test_nn.py::test_simple_pool 0.03s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g0-idtype0] 0.03s call tests/tensorflow/test_nn.py::test_graph_conv[2] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g4-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g4-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g4-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g4-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g0-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g2-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g3-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g4-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g0-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g6-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g6-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g5-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g2-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g0-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g2-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g4-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g4-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g4-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g3-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g5-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g1-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g1-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g6-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g5-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g6-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g3-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g3-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g2-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g5-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g1-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g1-idtype0] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g6-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g0-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g6-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g6-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g3-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g6-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g2-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g0-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g0-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g5-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g3-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g2-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g5-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g2-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g3-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g5-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g0-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g5-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g3-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g2-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g1-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g1-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g1-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g1-idtype1] 0.02s call tests/tensorflow/test_nn.py::test_glob_att_pool 0.01s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g1-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g1-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g1-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g1-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g0-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g0-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g0-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g0-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_appnp_conv[g0-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_sgc_conv[1-g0-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_sgc_conv[1-g5-idtype0] 0.01s call tests/tensorflow/test_nn.py::test_sgc_conv[2-g5-idtype0] ====================== 920 passed, 35 warnings in 13.00s ======================= Cloning into '/root/jenkins/workspace/dgl_PR-4648@2/third_party/tvm/3rdparty/vta-hw'... [Pipeline] } [Pipeline] // timeout [Pipeline] } [Pipeline] // stage Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 accf251fd0b56b1d65207f2fd238e906ccb6fddb8b2532d85c1dfc3329fb23a3 Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' $ docker rm -f accf251fd0b56b1d65207f2fd238e906ccb6fddb8b2532d85c1dfc3329fb23a3 [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] unstash [Pipeline] // withEnv [Pipeline] } Running on dglci-manual-gpu-worker in /root/jenkins/workspace/dgl_PR-4648 [Pipeline] // node [Pipeline] { [Pipeline] } [Pipeline] // stage [Pipeline] } [Pipeline] checkout The recommended git tool is: git Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/nccl'... using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-cpu-linux [Pipeline] timeout Timeout set to expire in 30 min [Pipeline] { [Pipeline] sh Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/phmap'... + bash tests/scripts/task_unit_test.sh tensorflow cpu Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe'... Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Requirement already satisfied: pytest in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (7.1.2) Collecting psutil Downloading psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (281 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 281.3/281.3 kB 11.7 MB/s eta 0:00:00 Collecting pyyaml Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 596.3/596.3 kB 27.7 MB/s eta 0:00:00 Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/thrust'... Cleaning workspace Fetching without tags Collecting pydantic Downloading pydantic-1.10.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.8 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 11.8/11.8 MB 115.9 MB/s eta 0:00:00 Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 Collecting pandas Using cached pandas-1.1.5-cp37-cp37m-manylinux1_x86_64.whl (9.5 MB) Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Collecting rdflib Downloading rdflib-6.2.0-py3-none-any.whl (500 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 500.3/500.3 kB 52.2 MB/s eta 0:00:00 Collecting ogb Downloading ogb-1.3.4-py3-none-any.whl (78 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 78.6/78.6 kB 13.5 MB/s eta 0:00:00 Requirement already satisfied: iniconfig in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (1.1.1) Requirement already satisfied: py>=1.8.2 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (1.11.0) Requirement already satisfied: pluggy<2.0,>=0.12 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (1.0.0) Requirement already satisfied: attrs>=19.2.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (22.1.0) Requirement already satisfied: packaging in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (21.3) Requirement already satisfied: importlib-metadata>=0.12 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (4.12.0) Requirement already satisfied: tomli>=1.0.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pytest) (2.0.1) Requirement already satisfied: typing-extensions>=4.1.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pydantic) (4.3.0) Commit message: "fix for pytorch < 1.12" Cleaning workspace [ OK ] SocketCommunicatorTest.SendAndRecv (7005 ms) [ RUN ] SocketCommunicatorTest.SendAndRecvTimeout Collecting pytz>=2017.2 Using cached pytz-2022.2.1-py2.py3-none-any.whl (500 kB) Requirement already satisfied: numpy>=1.15.4 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pandas) (1.18.5) Requirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from pandas) (2.8.2) Collecting isodate Downloading isodate-0.6.1-py2.py3-none-any.whl (41 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 41.7/41.7 kB 6.8 MB/s eta 0:00:00 Requirement already satisfied: pyparsing in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from rdflib) (3.0.9) Requirement already satisfied: setuptools in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from rdflib) (61.2.0) Collecting torch>=1.6.0 Downloading torch-1.12.1-cp37-cp37m-manylinux1_x86_64.whl (776.3 MB) [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm'... > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 + docker pull dgllib/dgl-ci-gpu:cu101_v220816 [ OK ] SocketCommunicatorTest.SendAndRecvTimeout (2001 ms) [ RUN ] SocketCommunicatorTest.TCPSocketBind [ OK ] SocketCommunicatorTest.TCPSocketBind (1 ms) [----------] 3 tests from SocketCommunicatorTest (9007 ms total) [----------] 3 tests from SplitStringTest [ RUN ] SplitStringTest.SplitStringUsingCompoundDelim [ OK ] SplitStringTest.SplitStringUsingCompoundDelim (0 ms) [ RUN ] SplitStringTest.testSplitStringUsingSingleDelim [ OK ] SplitStringTest.testSplitStringUsingSingleDelim (0 ms) [ RUN ] SplitStringTest.testSplitingNoDelimString [ OK ] SplitStringTest.testSplitingNoDelimString (0 ms) [----------] 3 tests from SplitStringTest (0 ms total) [----------] 1 test from StringPrintf [ RUN ] StringPrintf.normal [ OK ] StringPrintf.normal (0 ms) [----------] 1 test from StringPrintf (0 ms total) [----------] 13 tests from ArrayTest [ RUN ] ArrayTest.TestCreate [ OK ] ArrayTest.TestCreate (0 ms) [ RUN ] ArrayTest.TestRange [ OK ] ArrayTest.TestRange (0 ms) [ RUN ] ArrayTest.TestFull [ OK ] ArrayTest.TestFull (0 ms) [ RUN ] ArrayTest.TestClone [ OK ] ArrayTest.TestClone (0 ms) [ RUN ] ArrayTest.TestAsNumBits [ OK ] ArrayTest.TestAsNumBits (0 ms) [ RUN ] ArrayTest.Arith [ OK ] ArrayTest.Arith (0 ms) [ RUN ] ArrayTest.HStack [ OK ] ArrayTest.HStack (0 ms) [ RUN ] ArrayTest.TestIndexSelect [ OK ] ArrayTest.TestIndexSelect (0 ms) [ RUN ] ArrayTest.TestRelabel_ [ OK ] ArrayTest.TestRelabel_ (0 ms) [ RUN ] ArrayTest.CumSum [ OK ] ArrayTest.CumSum (1 ms) [ RUN ] ArrayTest.Scatter_ [ OK ] ArrayTest.Scatter_ (0 ms) [ RUN ] ArrayTest.NonZero [ OK ] ArrayTest.NonZero (0 ms) [ RUN ] ArrayTest.Sort [ OK ] ArrayTest.Sort (0 ms) [----------] 13 tests from ArrayTest (1 ms total) [----------] 2 tests from MatrixTest [ RUN ] MatrixTest.TestToSimpleCsr [ OK ] MatrixTest.TestToSimpleCsr (3 ms) [ RUN ] MatrixTest.TestToSimpleCoo [ OK ] MatrixTest.TestToSimpleCoo (0 ms) [----------] 2 tests from MatrixTest (3 ms total) [----------] 2 tests from DisjointUnionTest [ RUN ] DisjointUnionTest.TestDisjointUnionPartitionCoo [ OK ] DisjointUnionTest.TestDisjointUnionPartitionCoo (0 ms) [ RUN ] DisjointUnionTest.TestDisjointUnionPartitionCsr [ OK ] DisjointUnionTest.TestDisjointUnionPartitionCsr (1 ms) [----------] 2 tests from DisjointUnionTest (1 ms total) [----------] 2 tests from SliceContiguousChunk [ RUN ] SliceContiguousChunk.TestSliceContiguousChunkCoo [ OK ] SliceContiguousChunk.TestSliceContiguousChunkCoo (0 ms) [ RUN ] SliceContiguousChunk.TestSliceContiguousChunkCsr [ OK ] SliceContiguousChunk.TestSliceContiguousChunkCsr (0 ms) [----------] 2 tests from SliceContiguousChunk (0 ms total) [----------] 2 tests from MatrixUnionTest [ RUN ] MatrixUnionTest.TestMatrixUnionCsr [ OK ] MatrixUnionTest.TestMatrixUnionCsr (0 ms) [ RUN ] MatrixUnionTest.TestMatrixUnionCoo [ OK ] MatrixUnionTest.TestMatrixUnionCoo (0 ms) [----------] 2 tests from MatrixUnionTest (0 ms total) [----------] 1 test from LineGraphTest [ RUN ] LineGraphTest.LineGraphCOO [ OK ] LineGraphTest.LineGraphCOO (0 ms) [----------] 1 test from LineGraphTest (0 ms total) [----------] 3 tests from CsrmmTest [ RUN ] CsrmmTest.TestCsrmm [ OK ] CsrmmTest.TestCsrmm (3 ms) [ RUN ] CsrmmTest.TestCsrsum [ OK ] CsrmmTest.TestCsrsum (0 ms) [ RUN ] CsrmmTest.TestCsrmask [ OK ] CsrmmTest.TestCsrmask (4 ms) [----------] 3 tests from CsrmmTest (7 ms total) [----------] 2 tests from PartitionTest [ RUN ] PartitionTest.TestRemainderPartition [ OK ] PartitionTest.TestRemainderPartition (0 ms) [ RUN ] PartitionTest.TestRangePartition [ OK ] PartitionTest.TestRangePartition (0 ms) [----------] 2 tests from PartitionTest (0 ms total) [----------] 11 tests from RowwiseTest [ RUN ] RowwiseTest.TestCSRSampling cu101_v220816: Pulling from dgllib/dgl-ci-gpu Digest: sha256:ca40fc52876a2563a4e904d0c271d658c1acc8e6a4f8611b578bb49f8c7fd925 Status: Image is up to date for dgllib/dgl-ci-gpu:cu101_v220816 docker.io/dgllib/dgl-ci-gpu:cu101_v220816 [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker inspect -f . dgllib/dgl-ci-gpu:cu101_v220816 . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dglci-manual-gpu-worker does not seem to be running inside a container $ docker run -t -d -u 0:0 --runtime nvidia --shm-size=8gb -w /root/jenkins/workspace/dgl_PR-4648 -v /root/jenkins/workspace/dgl_PR-4648:/root/jenkins/workspace/dgl_PR-4648:rw,z -v /root/jenkins/workspace/dgl_PR-4648@tmp:/root/jenkins/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-gpu:cu101_v220816 cat $ docker top f9b6859df1d65887fd5c1562bda6c9300bf7673dd8efba82320500053c49173b -eo pid,comm [Pipeline] { [Pipeline] stage [Pipeline] { (Torch GPU Unit test) [Pipeline] sh + nvidia-smi Tue Sep 27 05:24:00 2022 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 510.47.03 Driver Version: 510.47.03 CUDA Version: 11.6 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla T4 On | 00000000:00:1E.0 Off | 0 | | N/A 45C P8 16W / 70W | 0MiB / 15360MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | No running processes found | +-----------------------------------------------------------------------------+ [Pipeline] sh + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@65304b80; decorates RemoteLauncher[hudson.remoting.Channel@41a64267:dglci-manual-gpu-worker] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] sh + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/xbyak'... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 776.3/776.3 MB 5.6 MB/s eta 0:00:00 Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/METIS/GKlib'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/googletest'... Requirement already satisfied: six>=1.12.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from ogb) (1.16.0) Requirement already satisfied: tqdm>=4.29.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from ogb) (4.64.0) Collecting scikit-learn>=0.20.0 Using cached scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (24.8 MB) Requirement already satisfied: urllib3>=1.24.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from ogb) (1.26.11) Collecting outdated>=0.2.0 Using cached outdated-0.2.1-py3-none-any.whl (7.5 kB) Requirement already satisfied: zipp>=0.5 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from importlib-metadata>=0.12->pytest) (3.8.1) Requirement already satisfied: requests in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from outdated>=0.2.0->ogb) (2.28.1) Collecting littleutils Using cached littleutils-0.2.2-py3-none-any.whl Collecting threadpoolctl>=2.0.0 Using cached threadpoolctl-3.1.0-py3-none-any.whl (14 kB) Requirement already satisfied: scipy>=1.1.0 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.4.1) Requirement already satisfied: joblib>=0.11 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.1.0) Requirement already satisfied: charset-normalizer<3,>=2 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2.1.0) Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2022.6.15) Requirement already satisfied: idna<4,>=2.5 in /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (3.3) Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Installing collected packages: pytz, littleutils, torch, threadpoolctl, pyyaml, pydantic, psutil, isodate, scikit-learn, rdflib, pandas, outdated, ogb Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/libuv'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/thrust/dependencies/cub'... Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/rang'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/vta-hw'... Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-cpu-linux [Pipeline] timeout Timeout set to expire in 30 min [Pipeline] { Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nccl'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/phmap'... [Pipeline] sh + bash tests/scripts/task_unit_test.sh pytorch cpu Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... Requirement already satisfied: pytest in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (7.1.2) Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust'... Collecting psutil Downloading psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (281 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 281.3/281.3 kB 12.7 MB/s eta 0:00:00 Collecting pyyaml Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 596.3/596.3 kB 45.6 MB/s eta 0:00:00 Collecting pydantic Downloading pydantic-1.10.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.8 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 11.8/11.8 MB 143.8 MB/s eta 0:00:00 Requirement already satisfied: pandas in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (1.1.5) Collecting rdflib Downloading rdflib-6.2.0-py3-none-any.whl (500 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 500.3/500.3 kB 71.4 MB/s eta 0:00:00 Requirement already satisfied: ogb in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (1.3.3) Requirement already satisfied: attrs>=19.2.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (22.1.0) Requirement already satisfied: pluggy<2.0,>=0.12 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (1.0.0) Requirement already satisfied: packaging in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (21.3) Requirement already satisfied: py>=1.8.2 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (1.11.0) Requirement already satisfied: tomli>=1.0.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (2.0.1) Requirement already satisfied: importlib-metadata>=0.12 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (4.12.0) Requirement already satisfied: iniconfig in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (1.1.1) Requirement already satisfied: typing-extensions>=4.1.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pydantic) (4.3.0) Requirement already satisfied: numpy>=1.15.4 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pandas) (1.21.6) Requirement already satisfied: pytz>=2017.2 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pandas) (2022.2.1) Requirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pandas) (2.8.2) Requirement already satisfied: setuptools in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from rdflib) (61.2.0) Requirement already satisfied: pyparsing in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from rdflib) (3.0.9) Collecting isodate Downloading isodate-0.6.1-py2.py3-none-any.whl (41 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 41.7/41.7 kB 10.0 MB/s eta 0:00:00 Requirement already satisfied: scikit-learn>=0.20.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.0.2) Requirement already satisfied: torch>=1.6.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.9.0+cpu) Requirement already satisfied: six>=1.12.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.16.0) Requirement already satisfied: outdated>=0.2.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (0.2.1) Requirement already satisfied: urllib3>=1.24.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.26.11) Requirement already satisfied: tqdm>=4.29.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (4.64.0) Requirement already satisfied: zipp>=0.5 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from importlib-metadata>=0.12->pytest) (3.8.1) Requirement already satisfied: littleutils in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from outdated>=0.2.0->ogb) (0.2.2) Requirement already satisfied: requests in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from outdated>=0.2.0->ogb) (2.28.1) Requirement already satisfied: threadpoolctl>=2.0.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (3.1.0) Requirement already satisfied: scipy>=1.1.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.7.3) Requirement already satisfied: joblib>=0.11 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.1.0) Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2022.6.15) Requirement already satisfied: idna<4,>=2.5 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (3.3) Requirement already satisfied: charset-normalizer<3,>=2 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2.1.0) Installing collected packages: pyyaml, pydantic, psutil, isodate, rdflib Successfully installed isodate-0.6.1 psutil-5.9.2 pydantic-1.10.2 pyyaml-6.0 rdflib-6.2.0 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv [ OK ] RowwiseTest.TestCSRSampling (24230 ms) [ RUN ] RowwiseTest.TestCSRSamplingUniform ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/pytorch-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648@3 collecting ... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm'... [ OK ] RowwiseTest.TestCSRSamplingUniform (4560 ms) [ RUN ] RowwiseTest.TestCSRPerEtypeSampling [ OK ] RowwiseTest.TestCSRPerEtypeSampling (20 ms) [ RUN ] RowwiseTest.TestCSRPerEtypeSamplingUniform [ OK ] RowwiseTest.TestCSRPerEtypeSamplingUniform (8 ms) [ RUN ] RowwiseTest.TestCOOSampling Successfully installed isodate-0.6.1 littleutils-0.2.2 ogb-1.3.4 outdated-0.2.1 pandas-1.1.5 psutil-5.9.2 pydantic-1.10.2 pytz-2022.2.1 pyyaml-6.0 rdflib-6.2.0 scikit-learn-1.0.2 threadpoolctl-3.1.0 torch-1.12.1 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/tensorflow-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648@2 collecting ... collected 2458 items tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype0] FAILED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype1] FAILED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[idtype0] PASSED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[idtype1] PASSED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype0] FAILED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype1] FAILED [ 0%] tests/compute/test_backend.py::test_set_default_backend PASSED [ 0%] tests/compute/test_basics.py::test_compatible PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[idtype1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[idtype0] Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' collected 2458 items tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype0] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype1] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[idtype0] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[idtype1] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype0] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype1] SKIPPED [ 0%] tests/compute/test_backend.py::test_set_default_backend PASSED [ 0%] tests/compute/test_basics.py::test_compatible PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_dynamic_addition PASSED [ 0%] tests/compute/test_basics.py::test_repr[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_repr[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[idtype0] Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... FAILED [ 1%] tests/compute/test_basics.py::test_issue_1088[idtype1] FAILED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[idtype0] FAILED [ 1%] tests/compute/test_basics.py::test_issue_2484[idtype1] FAILED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_propagate[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_propagate[idtype1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype0-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype0-gs1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype1-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype1-gs1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[idtype1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[idtype0] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[idtype1] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_netypes PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_dynamic_addition PASSED [ 0%] tests/compute/test_basics.py::test_repr[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_repr[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[idtype0] Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... PASSED [ 1%] tests/compute/test_basics.py::test_local_var[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype0-idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[idtype0] Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype0-idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype1-idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype1-idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype0-idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype1-idtype1] Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[idtype1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[idtype0] SKIPPED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[idtype1] SKIPPED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype1-idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype1-idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_propagate[idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype1-idtype0] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_propagate[idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype1-idtype1] Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype0-idtype0] Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype0-idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype1-idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype1] PASSED [ 4%] tests/compute/test_data.py::test_minigc PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[idtype0] PASSED [ 4%] tests/compute/test_data.py::test_gin PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype0-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype0-gs1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype1-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype1-gs1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[idtype0] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[idtype1] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_netypes PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype0-idtype0] [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-gpu-linux [Pipeline] timeout Timeout set to expire in 30 min [Pipeline] { [Pipeline] sh PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype0-idtype1] + bash tests/scripts/task_unit_test.sh pytorch gpu PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype1-idtype0] Requirement already satisfied: pytest in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (7.1.2) Collecting psutil Downloading psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (281 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 281.3/281.3 kB 11.5 MB/s eta 0:00:00 Collecting pyyaml Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 596.3/596.3 kB 47.9 MB/s eta 0:00:00 PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype1-idtype1] Collecting pydantic Downloading pydantic-1.10.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.8 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 11.8/11.8 MB 118.6 MB/s eta 0:00:00 Requirement already satisfied: pandas in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (1.1.5) Collecting rdflib Downloading rdflib-6.2.0-py3-none-any.whl (500 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 500.3/500.3 kB 64.3 MB/s eta 0:00:00 Requirement already satisfied: ogb in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (1.3.3) Requirement already satisfied: pluggy<2.0,>=0.12 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (1.0.0) Requirement already satisfied: packaging in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (21.3) Requirement already satisfied: tomli>=1.0.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (2.0.1) Requirement already satisfied: py>=1.8.2 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (1.11.0) Requirement already satisfied: iniconfig in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (1.1.1) Requirement already satisfied: importlib-metadata>=0.12 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (4.12.0) Requirement already satisfied: attrs>=19.2.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (22.1.0) Requirement already satisfied: typing-extensions>=4.1.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pydantic) (4.3.0) Requirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pandas) (2.8.2) Requirement already satisfied: pytz>=2017.2 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pandas) (2022.2.1) Requirement already satisfied: numpy>=1.15.4 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pandas) (1.21.6) Requirement already satisfied: setuptools in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from rdflib) (61.2.0) Collecting isodate Downloading isodate-0.6.1-py2.py3-none-any.whl (41 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 41.7/41.7 kB 8.5 MB/s eta 0:00:00 Requirement already satisfied: pyparsing in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from rdflib) (3.0.9) Requirement already satisfied: tqdm>=4.29.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (4.64.0) Requirement already satisfied: outdated>=0.2.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (0.2.1) Requirement already satisfied: torch>=1.6.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.9.0+cu102) Requirement already satisfied: urllib3>=1.24.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.26.11) Requirement already satisfied: six>=1.12.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.16.0) Requirement already satisfied: scikit-learn>=0.20.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.0.2) Requirement already satisfied: zipp>=0.5 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from importlib-metadata>=0.12->pytest) (3.8.1) Requirement already satisfied: requests in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from outdated>=0.2.0->ogb) (2.28.1) Requirement already satisfied: littleutils in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from outdated>=0.2.0->ogb) (0.2.2) Requirement already satisfied: scipy>=1.1.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.7.3) Requirement already satisfied: joblib>=0.11 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.1.0) Requirement already satisfied: threadpoolctl>=2.0.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (3.1.0) Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2022.6.15) Requirement already satisfied: charset-normalizer<3,>=2 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2.1.0) Requirement already satisfied: idna<4,>=2.5 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (3.3) Installing collected packages: pyyaml, pydantic, psutil, isodate, rdflib PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype0-idtype1] Successfully installed isodate-0.6.1 psutil-5.9.2 pydantic-1.10.2 pyyaml-6.0 rdflib-6.2.0 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype1-idtype1] ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/pytorch-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648 collecting ... PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype1-idtype1] PASSED [ 4%] tests/compute/test_data.py::test_minigc PASSED [ 4%] tests/compute/test_data.py::test_gin collected 2458 items tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype0] PASSED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype1] PASSED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[idtype0] PASSED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[idtype1] PASSED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype0] PASSED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype1] PASSED [ 0%] tests/compute/test_backend.py::test_set_default_backend PASSED [ 0%] tests/compute/test_basics.py::test_compatible PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_dynamic_addition PASSED [ 0%] tests/compute/test_basics.py::test_repr[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_repr[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[idtype1] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_propagate[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_propagate[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype0-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype0-gs1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype1-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[idtype1-gs1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[idtype1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[idtype0] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[idtype1] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_netypes SKIPPED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype1-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-dtype1-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype0-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype0-idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-dtype1-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype0-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype0-idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype1-idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[dtype1-idtype1] PASSED [ 4%] tests/compute/test_data.py::test_minigc SKIPPED (Datasets don't need...) [ 4%] tests/compute/test_data.py::test_gin SKIPPED (Datasets don't need to...) [ 4%] tests/compute/test_data.py::test_fraud SKIPPED (Datasets don't need ...) [ 5%] tests/compute/test_data.py::test_fakenews SKIPPED (Datasets don't ne...) [ 5%] tests/compute/test_data.py::test_tudataset_regression SKIPPED (Datas...) [ 5%] tests/compute/test_data.py::test_data_hash SKIPPED (Datasets don't n...) [ 5%] tests/compute/test_data.py::test_citation_graph SKIPPED (Datasets do...) [ 5%] tests/compute/test_data.py::test_gnn_benchmark SKIPPED (Datasets don...) [ 5%] tests/compute/test_data.py::test_reddit SKIPPED (Datasets don't need...) [ 5%] tests/compute/test_data.py::test_explain_syn SKIPPED (Datasets don't...) [ 5%] tests/compute/test_data.py::test_wiki_cs SKIPPED (Datasets don't nee...) [ 5%] tests/compute/test_data.py::test_yelp SKIPPED (Dataset too large to ...) [ 5%] tests/compute/test_data.py::test_flickr SKIPPED (Datasets don't need...) [ 5%] tests/compute/test_data.py::test_extract_archive SKIPPED (Datasets d...) [ 5%] tests/compute/test_data.py::test_csvdataset SKIPPED (Datasets don't ...) [ 5%] tests/compute/test_data.py::test_add_nodepred_split SKIPPED (Dataset...) [ 5%] tests/compute/test_data.py::test_as_nodepred1 SKIPPED (Datasets don'...) [ 5%] tests/compute/test_data.py::test_as_nodepred2 SKIPPED (Datasets don'...) [ 5%] tests/compute/test_data.py::test_as_nodepred_ogb PASSED [ 5%] tests/compute/test_data.py::test_as_linkpred SKIPPED (Datasets don't...) [ 5%] tests/compute/test_data.py::test_as_linkpred_ogb PASSED [ 4%] tests/compute/test_data.py::test_fraud PASSED [ 5%] tests/compute/test_data.py::test_fakenews PASSED [ 5%] tests/compute/test_data.py::test_as_nodepred_csvdataset SKIPPED (Dat...) [ 5%] tests/compute/test_data.py::test_as_graphpred SKIPPED (Datasets don'...) [ 5%] tests/compute/test_data.py::test_as_graphpred_reprocess SKIPPED (Dat...) [ 5%] tests/compute/test_data.py::test_as_graphpred_ogb PASSED [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[idtype0] PASSED [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[idtype1] PASSED [ 5%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax_unidirectional PASSED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype0-src-g0] PASSED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype0-dst-g0] PASSED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype1-src-g0] PASSED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype1-dst-g0] PASSED [ 6%] tests/compute/test_ffi.py::test_cython PASSED [ 6%] tests/compute/test_ffi.py::test_callback[1] PASSED [ 6%] tests/compute/test_ffi.py::test_callback[2.3] PASSED [ 6%] tests/compute/test_ffi.py::test_callback_thread[1] PASSED [ 6%] tests/compute/test_ffi.py::test_callback_thread[2.3] PASSED [ 6%] tests/compute/test_filter.py::test_graph_filter PASSED [ 6%] tests/compute/test_filter.py::test_array_filter[idtype0] PASSED [ 6%] tests/compute/test_filter.py::test_array_filter[idtype1] PASSED [ 6%] tests/compute/test_frame.py::test_column_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_plain PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_dtype PASSED [ 6%] tests/compute/test_generators.py::test_rand_graph SKIPPED (GPU rando...) [ 6%] tests/compute/test_graph.py::test_query PASSED [ 6%] tests/compute/test_graph.py::test_mutation PASSED [ 6%] tests/compute/test_graph.py::test_scipy_adjmat PASSED [ 6%] tests/compute/test_graph.py::test_incmat PASSED [ 6%] tests/compute/test_graph.py::test_find_edges PASSED [ 6%] tests/compute/test_graph.py::test_ismultigraph PASSED [ 6%] tests/compute/test_graph.py::test_hypersparse_query PASSED [ 6%] tests/compute/test_graph.py::test_empty_data_initialized PASSED [ 7%] tests/compute/test_graph.py::test_is_sorted PASSED [ 7%] tests/compute/test_graph.py::test_default_types PASSED [ 7%] tests/compute/test_graph.py::test_formats PASSED [ 7%] tests/compute/test_heterograph.py::test_create[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_create[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_create2 PASSED [ 7%] tests/compute/test_heterograph.py::test_query[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_query[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_device2[g1-idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_device2[g1-idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_pin_memory_[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_pin_memory_[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert_bound[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert_bound[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[idtype0] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[idtype1] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_more_nnz[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_more_nnz[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_updates[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_updates[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_backward[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_backward[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_float_cast PASSED [ 9%] tests/compute/test_heterograph.py::test_format[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_format[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_edges_order[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_edges_order[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_reverse[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_reverse[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_clone[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_clone[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame_device[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame_device[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_create_block[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_create_block[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csc-idtype0] PASSED [ 11%] tests/compute/test_heterograph.py::test_adj_sparse[csc-idtype1] PASSED [ 11%] tests/compute/test_heterograph.py::test_forking_pickler PASSED [ 11%] tests/compute/test_index.py::test_dlpack PASSED [ 11%] tests/compute/test_kernel.py::test_copy_src_reduce PASSED [ 11%] tests/compute/test_kernel.py::test_copy_edge_reduce PASSED [ 11%] tests/compute/test_kernel.py::test_all_binary_builtins [ OK ] RowwiseTest.TestCOOSampling (73878 ms) [ RUN ] RowwiseTest.TestCOOSamplingUniform PASSED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-idtype0] PASSED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-idtype1] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[idtype0] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[idtype1] PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_id PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_remainder PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_remainder PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_range PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_range PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_support PASSED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype0] PASSED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype1] PASSED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype0] PASSED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype1] PASSED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype0] PASSED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype1] PASSED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[idtype0] PASSED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_index PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph_index PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g0-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g0-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g1-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g1-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-idtype0] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g11-idtype0] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g11-idtype1] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g12-idtype0] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g12-idtype1] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_batched_heterograph SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_subgraph SKIPPED (GPU ed...) [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[idtype0] PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[idtype1] PASSED [ 13%] tests/compute/test_pin_memory.py::test_pin_unpin PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[idtype0] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[idtype1] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[idtype0] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[idtype1] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype0] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype1] SKIPPED [ 13%] tests/compute/test_random.py::test_random_choice SKIPPED (GPU random...) [ 13%] tests/compute/test_readout.py::test_sum_case1[idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_sum_case1[idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g0-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g0-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-idtype0] [ OK ] RowwiseTest.TestCOOSamplingUniform (9886 ms) [ RUN ] RowwiseTest.TestCOOerEtypeSampling PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g4-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g4-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g5-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g5-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g1-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g1-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g2-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g2-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g3-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g3-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g4-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g4-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g5-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g5-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g6-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g6-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g2-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g2-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g3-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g3-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g4-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g4-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_issue1287[idtype0] PASSED [ 20%] tests/compute/test_removal.py::test_issue1287[idtype1] PASSED [ 20%] tests/compute/test_sampler.py::test_create_full PASSED [ 20%] tests/compute/test_sampler.py::test_1neighbor_sampler_all PASSED [ 20%] tests/compute/test_sampler.py::test_1neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_prefetch_neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler_all PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_layer_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_setseed PASSED [ 20%] tests/compute/test_sampler.py::test_negative_sampler PASSED [ 4%] tests/compute/test_data.py::test_fraud PASSED [ 5%] tests/compute/test_data.py::test_fakenews PASSED [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[True] PASSED [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[True] PASSED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_node2vec SKIPPED (GPU random wa...) [ 20%] tests/compute/test_sampling.py::test_pack_traces SKIPPED (GPU pack t...) [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[True] PASSED [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[False] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_noprob PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_prob PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_outedge PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk SKIPPED (...) [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk_outedge SKIPPED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_with_0deg PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_biased_homogeneous SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_biased_bipartite SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[True] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[False] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[True] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[False] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[True] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[False] SKIPPED [ 22%] tests/compute/test_serialize.py::test_serialize_tensors PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_empty_dict PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files1 PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files2 PASSED [ 22%] tests/compute/test_serialize.py::test_deserialize_old_heterograph_file PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph SKIPPED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph_s3 SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_single_process[idtype0] PASSED [ 22%] tests/compute/test_shared_mem.py::test_single_process[idtype1] PASSED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[idtype0] PASSED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[idtype1] PASSED [ 22%] tests/compute/test_shared_mem.py::test_copy_from_gpu PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag[idtype0] SKIPPED (GPU...) [ 22%] tests/compute/test_sort.py::test_sort_with_tag[idtype1] SKIPPED (GPU...) [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[idtype0] SKIPPED [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[idtype1] SKIPPED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g0] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g1] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp4-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp4-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp5-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp5-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp0-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp0-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp1-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp1-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp2-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp2-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp4-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp4-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp5-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp5-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp2-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp2-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp4-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp4-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp5-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp5-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp0-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp0-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp1-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp2-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp2-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp4-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp4-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp5-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp5-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp0-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp0-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp2-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp2-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp3-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp3-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp5-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp5-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp0-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp0-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp1-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp2-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp2-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp3-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp3-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp4-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp4-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp5-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp5-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp0-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp0-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp1-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp2-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp3-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp4-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp4-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp5-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp5-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp0-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp1-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp1-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp2-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp3-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp5-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp5-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp2-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp5-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp5-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp0-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp2-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp3-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp3-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp4-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp4-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp5-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp5-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp0-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp1-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp1-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp2-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp2-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp3-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp3-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp4-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp5-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp5-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp0-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp1-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp1-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp2-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp2-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp3-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp3-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp4-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp5-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp5-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp0-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp1-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp1-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp2-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp2-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp3-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp3-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp4-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp0-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp0-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp2-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp3-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp3-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp4-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp0-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp0-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp1-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp1-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp2-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp3-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp3-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp4-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp4-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp5-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp5-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp0-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp0-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp1-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp3-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp3-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp4-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp4-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp5-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp5-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp0-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp0-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp1-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp3-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp4-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp5-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp5-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp4-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp5-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp5-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp1-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp1-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp2-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp4-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp5-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp0-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp0-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp1-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp1-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp2-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp3-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp4-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp5-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp0-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp0-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp1-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp1-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp2-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp3-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp4-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp0-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp1-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp2-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp2-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp3-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp3-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp4-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp0-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp1-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp2-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp2-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp3-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp3-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp4-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp5-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp5-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp0-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp0-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp1-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp2-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp2-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp3-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp4-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp5-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp5-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp0-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp0-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp1-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp2-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp2-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp3-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp4-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp5-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp5-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp1-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp1-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp2-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp3-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp3-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp4-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp4-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp5-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp5-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp1-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp1-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp2-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp3-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp3-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp4-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp5-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp5-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp0-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp0-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp1-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp2-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp2-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp3-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp3-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp4-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp5-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp5-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp0-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp0-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp1-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp2-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp2-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp3-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp3-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp4-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp5-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp5-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp0-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp0-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp1-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp2-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp3-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp4-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp4-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp5-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp5-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp1-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp2-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp3-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp4-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp4-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp0-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp0-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp2-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp3-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp4-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp5-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp5-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp0-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp0-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp1-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp2-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp3-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp4-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp5-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp5-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp0-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp0-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp1-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp2-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp2-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp3-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp4-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp4-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp5-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp5-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp0-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp0-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp1-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp2-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp2-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp3-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp4-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp4-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp5-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp5-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp1-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp3-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp4-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp5-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp5-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp1-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp3-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp4-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp1-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp3-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp0-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp1-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp3-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp0-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp1-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp2-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp2-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp3-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp4-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp3-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp4-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp1-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp1-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp2-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp4-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp4-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp0-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp2-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp4-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp4-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp0-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp0-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp1-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp0-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp1-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp4-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp4-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp0-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp1-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp1-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp0-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp1-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp1-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp3-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp3-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp0-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp1-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp0-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp1-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp0-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp0-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp1-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp0-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp0-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp1-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp2-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp2-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp0-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp0-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp2-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp2-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp4-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp4-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp0-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp4-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp4-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp0-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp0-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp1-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp2-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp2-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp0-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp1-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp2-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp2-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp4-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp4-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp1-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp1-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp2-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp1-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp1-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp2-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp3-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp4-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp3-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp4-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp2-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp4-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp0-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp1-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp2-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp4-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp0-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp1-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp2-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp0-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp0-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp1-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp0-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp0-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp1-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp2-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp3-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp2-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp3-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp3-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp4-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp0-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp3-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp4-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp0-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp2-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp3-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp2-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp3-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp4-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp4-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp0-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp1-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp2-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp2-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp0-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp1-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp2-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp2-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp3-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp4-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp4-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp3-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp4-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp4-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp0-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp1-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp0-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp1-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp4-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp0-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp1-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp4-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp0-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp1-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp4-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp0-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp4-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp0-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp3-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp3-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp3-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp4-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp4-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp0-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp3-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp4-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp4-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp0-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp1-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp2-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp3-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp1-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp2-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp3-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp3-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp4-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp4-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp3-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp4-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp4-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp1-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp2-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp1-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp2-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp3-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp3-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp4-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp4-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp3-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp3-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp4-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp4-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp0-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp0-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp1-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp1-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp2-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp1-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp1-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp2-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp2-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp3-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp3-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp4-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp2-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp3-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp3-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp4-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp2-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp3-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp4-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp0-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp0-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp1-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp1-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp2-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp2-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp3-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp4-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp0-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp0-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp1-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp1-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp2-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp2-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp3-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp4-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp0-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp1-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp4-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp0-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp1-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp2-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp3-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp2-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp3-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp4-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp4-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp2-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp2-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp2-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp2-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp0-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp0-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp0-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp1-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp1-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp2-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp3-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp3-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp4-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp4-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-src-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-dst-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-src-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-dst-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-src-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-dst-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-src-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-dst-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-src-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-dst-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-src-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-dst-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[sum] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[max] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[min] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[mean] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-idtype0] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-idtype1] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-idtype0] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-idtype1] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-idtype0] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-idtype1] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-idtype0] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-idtype1] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-idtype0] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-idtype1] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-idtype0] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-idtype1] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-idtype0] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-idtype1] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-idtype0] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-idtype1] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-idtype0] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-idtype1] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-idtype0] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-idtype1] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-idtype0] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-idtype1] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-idtype0] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-idtype1] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-idtype0] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-idtype1] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-idtype0] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-idtype1] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-idtype0] PASSED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-idtype1] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-1] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-8] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-16] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-64] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-256] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-1] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-8] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-16] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-64] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-256] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-1] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-8] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-16] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-64] PASSED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-256] PASSED [ 94%] tests/compute/test_sparse.py::test_use_libxsmm_switch SKIPPED (Libxs...) [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[idtype0] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[idtype1] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[idtype0] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[idtype1] PASSED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[idtype0] PASSED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[idtype1] PASSED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype0] PASSED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype1] PASSED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[idtype0] PASSED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[idtype1] PASSED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[idtype0] PASSED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_edge_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph1[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph1[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_message_passing PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device0] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device1] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device2] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device3] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device0] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device1] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device2] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device3] PASSED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype0-device0] PASSED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype0-device1] PASSED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype1-device0] PASSED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype1-device1] PASSED [ 96%] tests/compute/test_transform.py::test_line_graph1 PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[idtype0] PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_no_backtracking PASSED [ 96%] tests/compute/test_transform.py::test_reverse[idtype0] PASSED [ 96%] tests/compute/test_transform.py::test_reverse[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_to_bidirected SKIPPED (GPU not...) [ 96%] tests/compute/test_transform.py::test_add_reverse_edges PASSED [ 96%] tests/compute/test_transform.py::test_simple_graph SKIPPED (GPU not ...) [ 96%] tests/compute/test_transform.py::test_khop_graph SKIPPED (GPU not im...) [ 96%] tests/compute/test_transform.py::test_khop_adj SKIPPED (GPU not impl...) [ 96%] tests/compute/test_transform.py::test_laplacian_lambda_max SKIPPED (...) [ 97%] tests/compute/test_transform.py::test_partition_with_halo SKIPPED (G...) [ 97%] tests/compute/test_transform.py::test_metis_partition[idtype0] SKIPPED [ 97%] tests/compute/test_transform.py::test_metis_partition[idtype1] SKIPPED [ 97%] tests/compute/test_transform.py::test_reorder_nodes SKIPPED (It does...) [ 97%] tests/compute/test_transform.py::test_compact[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_compact[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_to_simple[idtype0] SKIPPED (GP...) [ 97%] tests/compute/test_transform.py::test_to_simple[idtype1] SKIPPED (GP...) [ 97%] tests/compute/test_transform.py::test_to_block[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_to_block[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_add_selfloop[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_selfloop[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_norm_by_dst[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_norm_by_dst[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_to_simple[idtype0] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_to_simple[idtype1] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_line_graph[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_line_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_ppr[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_ppr[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_gdc[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_gdc[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_node[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_node[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_add_edge[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_add_edge[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_sign[g0] PASSED [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[idtype1] PASSED [ 99%] tests/compute/test_traversal.py::test_bfs[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_bfs[idtype1] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[idtype1] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[idtype1] PASSED [100%] =============================== warnings summary =============================== python/dgl/backend/backend.py:1741 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/backend.py:1741: DeprecationWarning: invalid escape sequence \P """ python/dgl/backend/pytorch/tensor.py:16 python/dgl/backend/pytorch/tensor.py:16 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:16: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) < LooseVersion("1.9.0"): python/dgl/backend/pytorch/tensor.py:340 python/dgl/backend/pytorch/tensor.py:340 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:340: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) >= LooseVersion("1.10.0"): python/dgl/dataloading/dataloader.py:33 /root/jenkins/workspace/dgl_PR-4648/python/dgl/dataloading/dataloader.py:33: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(torch.__version__) python/dgl/_dataloading/pytorch/dataloader.py:23 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:23: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(th.__version__) python/dgl/_dataloading/pytorch/dataloader.py:24 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:24: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_16 = PYTORCH_VER >= LooseVersion("1.6.0") python/dgl/_dataloading/pytorch/dataloader.py:25 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:25: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_17 = PYTORCH_VER >= LooseVersion("1.7.0") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _nlv = LooseVersion(_np_version) ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p16 = _nlv < LooseVersion("1.16") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p17 = _nlv < LooseVersion("1.17") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p18 = _nlv < LooseVersion("1.18") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p19 = _nlv < LooseVersion("1.19") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p20 = _nlv < LooseVersion("1.20") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. other = LooseVersion(other) ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(_np_version) >= LooseVersion("1.17.0"): tests/compute/test_basics.py: 2 warnings tests/compute/test_filter.py: 1 warning tests/compute/test_graph.py: 9 warnings tests/compute/test_kernel.py: 3 warnings tests/compute/test_removal.py: 16 warnings tests/compute/test_specialization.py: 12 warnings tests/compute/test_subgraph.py: 2 warnings tests/compute/test_transform.py: 6 warnings tests/compute/test_traversal.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/compute/test_basics.py: 2 warnings tests/compute/test_batched_graph.py: 10 warnings tests/compute/test_graph.py: 2 warnings tests/compute/test_kernel.py: 1 warning tests/compute/test_removal.py: 10 warnings tests/compute/test_specialization.py: 10 warnings tests/compute/test_subgraph.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:354: DGLWarning: DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges dgl_warning("DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges") tests/compute/test_basics.py::test_update_all_0deg[idtype0] tests/compute/test_basics.py::test_update_all_0deg[idtype1] tests/compute/test_basics.py::test_pull_0deg[idtype0] tests/compute/test_basics.py::test_pull_0deg[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/core.py:79: DGLWarning: The input graph for the user-defined edge function does not contain valid edges dgl_warning('The input graph for the user-defined edge function ' \ tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype0] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype1] tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_transform.py::test_no_backtracking tests/compute/test_transform.py::test_reverse[idtype0] tests/compute/test_transform.py::test_reverse[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2978: DGLWarning: DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids. dgl_warning("DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids.") tests/compute/test_batched_heterograph.py::test_features[idtype0] tests/compute/test_batched_heterograph.py::test_features[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/batch.py:159: DGLWarning: Arguments edge_attrs has been deprecated. Please use edata instead. dgl_warning('Arguments edge_attrs has been deprecated. Please use' tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype0] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype1] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype0] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype1] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype0] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype1] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype0] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph_index.py:797: FutureWarning: Adjacency matrix by default currently returns edge IDs. As a result there is one 0 entry which is not eliminated. In the next release it will return 1s by default, and 0 will be eliminated otherwise. FutureWarning) tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_heterograph.py::test_query[idtype0] tests/compute/test_heterograph.py::test_query[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2753: DGLWarning: DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes dgl_warning("DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2687: DGLWarning: DGLGraph.__contains__ is deprecated. Please directly call has_nodes. dgl_warning('DGLGraph.__contains__ is deprecated.' tests/compute/test_graph.py::test_query tests/compute/test_sampling.py::test_non_uniform_random_walk[True] tests/compute/test_sampling.py::test_non_uniform_random_walk[False] tests/compute/test_sampling.py::test_uniform_random_walk[True] tests/compute/test_sampling.py::test_uniform_random_walk[False] tests/compute/test_transform.py::test_no_backtracking /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2851: DGLWarning: DGLGraph.has_edge_between is deprecated. Please use DGLGraph.has_edges_between dgl_warning("DGLGraph.has_edge_between is deprecated. " tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:3432: DGLWarning: DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees dgl_warning("DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:3516: DGLWarning: DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees dgl_warning("DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees") tests/compute/test_graph.py::test_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly', 'sort_csr'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648/tests/compute/test_heterograph.py:1128: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(src_i)) == nid[src[i]] tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648/tests/compute/test_heterograph.py:1129: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(dst_i)) == nid[dst[i]] tests/compute/test_heterograph.py::test_invertible_conversion[idtype0] tests/compute/test_heterograph.py::test_invertible_conversion[idtype1] tests/compute/test_shared_mem.py::test_single_process[idtype0] tests/compute/test_shared_mem.py::test_single_process[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2635: DGLWarning: DGLGraph.is_readonly is deprecated in v0.5. DGLGraph now always supports mutable operations like add_nodes and add_edges. dgl_warning('DGLGraph.is_readonly is deprecated in v0.5.\n' tests/compute/test_heterograph.py::test_types_in_function[idtype0] tests/compute/test_heterograph.py::test_types_in_function[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:277: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). mask = th.tensor(mask, dtype=th.bool) tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype0] tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:569: UserWarning: The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad attribute won't be populated during autograd.backward(). If you indeed want the gradient for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor instead. See github.com/pytorch/pytorch/pull/30531 for more information. return x.grad tests/compute/test_partition.py::test_get_node_partition_from_book[idtype0] tests/compute/test_partition.py::test_get_node_partition_from_book[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648/python/dgl/contrib/sampling/sampler.py:317: DGLWarning: dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5.' tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_layer_sampler tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648/python/dgl/_deprecate/nodeflow.py:99: DGLWarning: NodeFlow APIs are deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('NodeFlow APIs are deprecated starting from v0.5. Please read our' tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/sampling/negative.py:102: ComplexWarning: Casting complex values to real discards the imaginary part g._graph, etype_id, num_samples, 3, exclude_self_loops, replace, redundancy) tests/compute/test_serialize.py::test_load_old_files1 tests/compute/test_serialize.py::test_load_old_files2 /root/jenkins/workspace/dgl_PR-4648/python/dgl/data/graph_serialize.py:179: DGLWarning: You are loading a graph file saved by old version of dgl. Please consider saving it again with the current format. Please consider saving it again with the current format.") tests/compute/test_sparse.py: 80 warnings /root/jenkins/workspace/dgl_PR-4648/tests/compute/test_sparse.py:299: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ tests/compute/test_sparse.py: 40 warnings /root/jenkins/workspace/dgl_PR-4648/tests/compute/test_sparse.py:339: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/transforms/functional.py:1267: DGLWarning: share_ndata argument has been renamed to copy_ndata. dgl_warning('share_ndata argument has been renamed to copy_ndata.') tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/transforms/functional.py:1270: DGLWarning: share_edata argument has been renamed to copy_edata. dgl_warning('share_edata argument has been renamed to copy_edata.') tests/compute/test_transform.py::test_module_random_walk_pe[idtype0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:44: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /pytorch/torch/csrc/utils/tensor_numpy.cpp:180.) return th.as_tensor(data, dtype=dtype) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html -- generated xml file: /root/jenkins/workspace/dgl_PR-4648/pytest_compute.xml -- ============================ slowest 100 durations ============================= 23.31s call tests/compute/test_data.py::test_as_graphpred_ogb 11.00s call tests/compute/test_data.py::test_as_linkpred_ogb 6.86s call tests/compute/test_data.py::test_as_nodepred_ogb 6.60s call tests/compute/test_sampler.py::test_negative_sampler 3.20s call tests/compute/test_kernel.py::test_all_binary_builtins 1.28s call tests/compute/test_heterograph.py::test_forking_pickler 0.67s call tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype0] 0.38s call tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype0] 0.28s call tests/compute/test_sampling.py::test_non_uniform_random_walk[False] 0.26s call tests/compute/test_graph.py::test_query 0.19s call tests/compute/test_heterograph.py::test_query[idtype0] 0.18s call tests/compute/test_heterograph.py::test_query[idtype1] 0.18s call tests/compute/test_sampling.py::test_sample_neighbors_outedge 0.18s call tests/compute/test_sampling.py::test_uniform_random_walk[False] 0.17s call tests/compute/test_sampling.py::test_non_uniform_random_walk[True] 0.15s call tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype1] 0.15s call tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype0] 0.15s call tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype1] 0.12s call tests/compute/test_sampling.py::test_uniform_random_walk[True] 0.12s call tests/compute/test_kernel.py::test_copy_edge_reduce 0.12s call tests/compute/test_shared_mem.py::test_multi_process[idtype0] 0.11s call tests/compute/test_shared_mem.py::test_copy_from_gpu 0.11s call tests/compute/test_kernel.py::test_copy_src_reduce 0.11s call tests/compute/test_shared_mem.py::test_multi_process[idtype1] 0.10s call tests/compute/test_sampling.py::test_sample_neighbors_prob 0.10s call tests/compute/test_sampling.py::test_sample_neighbors_noprob 0.08s call tests/compute/test_heterograph.py::test_view1[idtype0] 0.08s call tests/compute/test_heterograph.py::test_view1[idtype1] 0.06s call tests/compute/test_heterograph.py::test_updates[idtype0] 0.06s call tests/compute/test_heterograph.py::test_updates[idtype1] 0.06s call tests/compute/test_sampler.py::test_prefetch_neighbor_sampler 0.06s call tests/compute/test_nccl.py::test_nccl_sparse_push_single_remainder 0.05s call tests/compute/test_traversal.py::test_bfs[idtype0] 0.05s call tests/compute/test_removal.py::test_node_and_edge_removal[idtype0] 0.05s call tests/compute/test_removal.py::test_node_and_edge_removal[idtype1] 0.05s call tests/compute/test_sampler.py::test_1neighbor_sampler 0.05s call tests/compute/test_heterograph.py::test_level2[idtype0] 0.04s call tests/compute/test_traversal.py::test_bfs[idtype1] 0.04s call tests/compute/test_heterograph.py::test_level2[idtype1] 0.04s call tests/compute/test_transform.py::test_remove_nodes[idtype0] 0.04s call tests/compute/test_specialization.py::test_v2v_snr[idtype0] 0.04s call tests/compute/test_transform.py::test_remove_nodes[idtype1] 0.04s call tests/compute/test_specialization.py::test_v2v_snr[idtype1] 0.04s call tests/compute/test_sampler.py::test_10neighbor_sampler 0.04s call tests/compute/test_nccl.py::test_nccl_sparse_pull_single_remainder 0.04s call tests/compute/test_specialization.py::test_v2v_pull[idtype0] 0.03s call tests/compute/test_sampler.py::test_1neighbor_sampler_all 0.03s call tests/compute/test_nccl.py::test_nccl_sparse_pull_single_range 0.03s call tests/compute/test_nccl.py::test_nccl_sparse_push_single_range 0.03s call tests/compute/test_specialization.py::test_v2v_pull[idtype1] 0.03s call tests/compute/test_transform.py::test_remove_edges[idtype0] 0.03s call tests/compute/test_specialization.py::test_pull_multi_fallback[idtype0] 0.03s call tests/compute/test_transform.py::test_remove_edges[idtype1] 0.03s call tests/compute/test_specialization.py::test_pull_multi_fallback[idtype1] 0.03s call tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype0] 0.03s call tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype0] 0.03s call tests/compute/test_heterograph.py::test_flatten[idtype0] 0.03s call tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype1] 0.03s call tests/compute/test_heterograph.py::test_format[idtype0] 0.03s call tests/compute/test_specialization.py::test_v2v_update_all[idtype0] 0.03s call tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler 0.03s call tests/compute/test_heterograph.py::test_format[idtype1] 0.03s call tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype1] 0.03s call tests/compute/test_serialize.py::test_load_old_files1 0.02s call tests/compute/test_heterograph.py::test_flatten[idtype1] 0.02s call tests/compute/test_sampler.py::test_setseed 0.02s call tests/compute/test_specialization.py::test_v2v_update_all[idtype1] 0.02s call tests/compute/test_heterograph.py::test_convert[idtype0] 0.02s call tests/compute/test_heterograph.py::test_convert[idtype1] 0.02s call tests/compute/test_transform.py::test_to_block[idtype0] 0.02s call tests/compute/test_subgraph.py::test_subgraph1[idtype0] 0.02s call tests/compute/test_subgraph.py::test_subgraph1[idtype1] 0.02s call tests/compute/test_transform.py::test_to_block[idtype1] 0.02s call tests/compute/test_sampler.py::test_layer_sampler 0.02s call tests/compute/test_transform.py::test_reorder_graph[idtype0] 0.02s call tests/compute/test_specialization.py::test_spmv_3d_feat[idtype1] 0.02s call tests/compute/test_specialization.py::test_spmv_3d_feat[idtype0] 0.02s call tests/compute/test_transform.py::test_add_edges[idtype0] 0.02s call tests/compute/test_transform.py::test_add_edges[idtype1] 0.02s call tests/compute/test_transform.py::test_reorder_graph[idtype1] 0.02s call tests/compute/test_heterograph.py::test_types_in_function[idtype0] 0.02s call tests/compute/test_heterograph.py::test_types_in_function[idtype1] 0.02s call tests/compute/test_sampling.py::test_pinsage_sampling[True] 0.02s call tests/compute/test_transform.py::test_module_sign[g0] 0.02s call tests/compute/test_transform.py::test_add_reverse_edges 0.02s call tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype0] 0.02s call tests/compute/test_heterograph.py::test_add_edges[idtype0] 0.02s call tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype0] 0.02s call tests/compute/test_heterograph.py::test_remove_nodes[idtype0] 0.02s call tests/compute/test_heterograph.py::test_float_cast 0.02s call tests/compute/test_heterograph.py::test_add_edges[idtype1] 0.01s call tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype1] 0.01s call tests/compute/test_heterograph.py::test_remove_nodes[idtype1] 0.01s call tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype0] 0.01s call tests/compute/test_heterograph.py::test_remove_edges[idtype0] 0.01s call tests/compute/test_heterograph.py::test_remove_edges[idtype1] 0.01s call tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype1] 0.01s call tests/compute/test_transform.py::test_add_selfloop[idtype0] 0.01s call tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype1] 0.01s call tests/compute/test_readout.py::test_topk[True-g0-idtype0] ========== 2334 passed, 124 skipped, 342 warnings in 77.18s (0:01:17) ========== ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/pytorch-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648 collecting ... collected 3978 items tests/pytorch/test_dataloader.py::test_graph_dataloader PASSED [ 0%] tests/pytorch/test_dataloader.py::test_cluster_gcn[0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_cluster_gcn[4] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_shadow[0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_shadow[4] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[node-0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[node-4] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[edge-0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[edge-4] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[walk-0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_saint[walk-4] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-cpu-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-cpu-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-uva_cuda_indices-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-uva_cuda_indices-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-uva_cpu_indices-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-uva_cpu_indices-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-pure_gpu-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-pure_gpu-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-cpu-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-cpu-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-uva_cuda_indices-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-uva_cuda_indices-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-uva_cpu_indices-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-uva_cpu_indices-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-pure_gpu-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-pure_gpu-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-full-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-full-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor2-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor2-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-full-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-full-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-neighbor-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-neighbor-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-neighbor2-idtype0] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cuda_indices-neighbor2-idtype1] PASSED [ 0%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-full-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-full-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-neighbor-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-neighbor-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-neighbor2-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-uva_cpu_indices-neighbor2-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-full-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-full-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-neighbor-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-neighbor-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-neighbor2-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[False-pure_gpu-neighbor2-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-full-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-full-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor-idtype0] [ OK ] RowwiseTest.TestCOOerEtypeSampling (40265 ms) [ RUN ] RowwiseTest.TestCOOPerEtypeSamplingUniform PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor2-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor2-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-full-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-full-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-neighbor-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-neighbor-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-neighbor2-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cuda_indices-neighbor2-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-full-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-full-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-neighbor-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-neighbor-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-neighbor2-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-uva_cpu_indices-neighbor2-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-full-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-full-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-neighbor-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-neighbor-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-neighbor2-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_node_dataloader[True-pure_gpu-neighbor2-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-full-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-full-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-neighbor-idtype0] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-neighbor-idtype1] PASSED [ 1%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler0-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler0-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler0-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler0-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler1-full-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler1-full-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler1-neighbor-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler1-neighbor-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler2-full-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler2-full-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler2-neighbor-idtype0] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-uva-neg_sampler2-neighbor-idtype1] SKIPPED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler0-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler0-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler0-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler0-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler1-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler1-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler1-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler1-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler2-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler2-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler2-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[False-pure_gpu-neg_sampler2-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-full-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-full-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-neighbor-idtype0] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-neighbor-idtype1] PASSED [ 2%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-full-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-full-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-neighbor-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-neighbor-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-full-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-full-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-neighbor-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-neighbor-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler1-full-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler1-full-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler1-neighbor-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler1-neighbor-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler2-full-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler2-full-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler2-neighbor-idtype0] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler2-neighbor-idtype1] SKIPPED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler0-full-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler0-full-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler0-neighbor-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler0-neighbor-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-full-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-full-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-neighbor-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-neighbor-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-full-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-full-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-neighbor-idtype0] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-neighbor-idtype1] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-None-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-None-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-self-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-self-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-reverse_id-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-reverse_id-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-reverse_types-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-reverse_types-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-None-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-None-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-self-False] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-self-True] PASSED [ 3%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-reverse_id-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-reverse_id-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-reverse_types-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-reverse_types-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-None-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-None-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-self-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-self-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-reverse_id-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-reverse_id-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-reverse_types-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler0-reverse_types-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-None-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-None-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-self-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-self-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-reverse_id-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-reverse_id-True] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-reverse_types-False] PASSED [ 4%] tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-reverse_types-True] PASSED [ 4%] tests/pytorch/test_dist_optim.py::test_sparse_opt PASSED [ 5%] tests/compute/test_data.py::test_tudataset_regression PASSED [ 5%] tests/compute/test_data.py::test_tudataset_regression [ OK ] RowwiseTest.TestCOOPerEtypeSamplingUniform (30441 ms) [ RUN ] RowwiseTest.TestCSRTopk [ OK ] RowwiseTest.TestCSRTopk (381 ms) [ RUN ] RowwiseTest.TestCOOTopk [ OK ] RowwiseTest.TestCOOTopk (1037 ms) [ RUN ] RowwiseTest.TestCSRSamplingBiased [ OK ] RowwiseTest.TestCSRSamplingBiased (3684 ms) [----------] 11 tests from RowwiseTest (188390 ms total) [----------] 3 tests from SampleUtilsTest [ RUN ] SampleUtilsTest.TestWithReplacement PASSED [ 5%] tests/compute/test_data.py::test_data_hash PASSED [ 5%] tests/compute/test_data.py::test_citation_graph [ OK ] SampleUtilsTest.TestWithReplacement (2537 ms) [ RUN ] SampleUtilsTest.TestWithoutReplacementOrder [ OK ] SampleUtilsTest.TestWithoutReplacementOrder (0 ms) [ RUN ] SampleUtilsTest.TestWithoutReplacementUnique PASSED [ 5%] tests/compute/test_data.py::test_gnn_benchmark PASSED [ 5%] tests/compute/test_data.py::test_data_hash PASSED [ 5%] tests/compute/test_data.py::test_citation_graph [ OK ] SampleUtilsTest.TestWithoutReplacementUnique (3933 ms) [----------] 3 tests from SampleUtilsTest (6470 ms total) [----------] 3 tests from RandomTest [ RUN ] RandomTest.TestChoice [ OK ] RandomTest.TestChoice (1 ms) [ RUN ] RandomTest.TestUniformChoice [ OK ] RandomTest.TestUniformChoice (0 ms) [ RUN ] RandomTest.TestBiasedChoice [ OK ] RandomTest.TestBiasedChoice (11 ms) [----------] 3 tests from RandomTest (12 ms total) [----------] 4 tests from Serialize [ RUN ] Serialize.UnitGraph_COO [ OK ] Serialize.UnitGraph_COO (59 ms) [ RUN ] Serialize.UnitGraph_CSR [ OK ] Serialize.UnitGraph_CSR (49 ms) [ RUN ] Serialize.ImmutableGraph [ OK ] Serialize.ImmutableGraph (42 ms) [ RUN ] Serialize.HeteroGraph [ OK ] Serialize.HeteroGraph (100 ms) [----------] 4 tests from Serialize (251 ms total) [----------] 3 tests from SmartPtrTest/0, where TypeParam = std::shared_ptr [ RUN ] SmartPtrTest/0.Obj_Test [ OK ] SmartPtrTest/0.Obj_Test (0 ms) [ RUN ] SmartPtrTest/0.Vector_Test1 [ OK ] SmartPtrTest/0.Vector_Test1 (0 ms) [ RUN ] SmartPtrTest/0.Vector_Test2 [ OK ] SmartPtrTest/0.Vector_Test2 (0 ms) [----------] 3 tests from SmartPtrTest/0 (0 ms total) [----------] 3 tests from SmartPtrTest/1, where TypeParam = std::unique_ptr > [ RUN ] SmartPtrTest/1.Obj_Test [ OK ] SmartPtrTest/1.Obj_Test (0 ms) [ RUN ] SmartPtrTest/1.Vector_Test1 [ OK ] SmartPtrTest/1.Vector_Test1 (0 ms) [ RUN ] SmartPtrTest/1.Vector_Test2 [ OK ] SmartPtrTest/1.Vector_Test2 (0 ms) [----------] 3 tests from SmartPtrTest/1 (0 ms total) [----------] 19 tests from SpmatTest [ RUN ] SpmatTest.COOToCSR [ OK ] SpmatTest.COOToCSR (383 ms) [ RUN ] SpmatTest.TestCOOHasDuplicate [ OK ] SpmatTest.TestCOOHasDuplicate (0 ms) [ RUN ] SpmatTest.COOSort [ OK ] SpmatTest.COOSort (0 ms) [ RUN ] SpmatTest.TestCOOReorder [ OK ] SpmatTest.TestCOOReorder (6 ms) [ RUN ] SpmatTest.COOGetData [ OK ] SpmatTest.COOGetData (55 ms) [ RUN ] SpmatTest.COOGetDataAndIndices [ OK ] SpmatTest.COOGetDataAndIndices (0 ms) [ RUN ] SpmatTest.TestCSRIsNonZero [ OK ] SpmatTest.TestCSRIsNonZero (12 ms) [ RUN ] SpmatTest.TestCSRGetRowNNZ [ OK ] SpmatTest.TestCSRGetRowNNZ (0 ms) [ RUN ] SpmatTest.TestCSRGetRowColumnIndices [ OK ] SpmatTest.TestCSRGetRowColumnIndices (0 ms) [ RUN ] SpmatTest.TestCSRGetRowData [ OK ] SpmatTest.TestCSRGetRowData (0 ms) [ RUN ] SpmatTest.CSRGetData [ OK ] SpmatTest.CSRGetData (0 ms) [ RUN ] SpmatTest.CSRGetDataAndIndices [ OK ] SpmatTest.CSRGetDataAndIndices (0 ms) [ RUN ] SpmatTest.CSRTranspose [ OK ] SpmatTest.CSRTranspose (0 ms) [ RUN ] SpmatTest.CSRToCOO [ OK ] SpmatTest.CSRToCOO (0 ms) [ RUN ] SpmatTest.TestCSRSliceRows [ OK ] SpmatTest.TestCSRSliceRows (601 ms) [ RUN ] SpmatTest.CSRSliceMatrix [ OK ] SpmatTest.CSRSliceMatrix (2 ms) [ RUN ] SpmatTest.CSRHasDuplicate [ OK ] SpmatTest.CSRHasDuplicate (0 ms) [ RUN ] SpmatTest.CSRSort [ OK ] SpmatTest.CSRSort (0 ms) [ RUN ] SpmatTest.TestCSRReorder [ OK ] SpmatTest.TestCSRReorder (0 ms) [----------] 19 tests from SpmatTest (1059 ms total) [----------] 6 tests from SpmmTest [ RUN ] SpmmTest.TestSpmmCopyLhs [ OK ] SpmmTest.TestSpmmCopyLhs (0 ms) [ RUN ] SpmmTest.TestSpmmCopyRhs [ OK ] SpmmTest.TestSpmmCopyRhs (1 ms) [ RUN ] SpmmTest.TestSpmmAdd [ OK ] SpmmTest.TestSpmmAdd (0 ms) [ RUN ] SpmmTest.TestSpmmSub [ OK ] SpmmTest.TestSpmmSub (0 ms) [ RUN ] SpmmTest.TestSpmmMul [ OK ] SpmmTest.TestSpmmMul (1 ms) [ RUN ] SpmmTest.TestSpmmDiv [ OK ] SpmmTest.TestSpmmDiv (0 ms) [----------] 6 tests from SpmmTest (2 ms total) [----------] 7 tests from UniGraphTest [ RUN ] UniGraphTest.TestUnitGraph_CopyTo [ OK ] UniGraphTest.TestUnitGraph_CopyTo (0 ms) [ RUN ] UniGraphTest.TestUnitGraph_InOutDegrees [ OK ] UniGraphTest.TestUnitGraph_InOutDegrees (52 ms) [ RUN ] UniGraphTest.TestUnitGraph_Create [ OK ] UniGraphTest.TestUnitGraph_Create (0 ms) [ RUN ] UniGraphTest.TestUnitGraph_GetInCSR [ OK ] UniGraphTest.TestUnitGraph_GetInCSR (190 ms) [ RUN ] UniGraphTest.TestUnitGraph_GetOutCSR [ OK ] UniGraphTest.TestUnitGraph_GetOutCSR (97 ms) [ RUN ] UniGraphTest.TestUnitGraph_GetCOO [ OK ] UniGraphTest.TestUnitGraph_GetCOO (0 ms) [ RUN ] UniGraphTest.TestUnitGraph_Reserve [ OK ] UniGraphTest.TestUnitGraph_Reserve (122 ms) [----------] 7 tests from UniGraphTest (461 ms total) [----------] 4 tests from ZeroCopySerialize [ RUN ] ZeroCopySerialize.NDArray [ OK ] ZeroCopySerialize.NDArray (0 ms) [ RUN ] ZeroCopySerialize.ZeroShapeNDArray [ OK ] ZeroCopySerialize.ZeroShapeNDArray (0 ms) [ RUN ] ZeroCopySerialize.SharedMem [ OK ] ZeroCopySerialize.SharedMem (0 ms) [ RUN ] ZeroCopySerialize.HeteroGraph [ OK ] ZeroCopySerialize.HeteroGraph (91 ms) [----------] 4 tests from ZeroCopySerialize (91 ms total) [----------] Global test environment tear-down [==========] 101 tests from 23 test suites ran. (205767 ms total) [ PASSED ] 101 tests. ~/jenkins/workspace/dgl_PR-4648 Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 280b39495ad3fad21dbb9762a178907987f011c3e8f474eb1caee524b0ff882c PASSED [ 5%] tests/compute/test_data.py::test_gnn_benchmark PASSED [ 5%] tests/compute/test_data.py::test_reddit $ docker rm -f 280b39495ad3fad21dbb9762a178907987f011c3e8f474eb1caee524b0ff882c [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } Running on dgl-manual-large-cpu in /root/jenkins/workspace/dgl_PR-4648 PASSED [ 4%] tests/pytorch/test_geometry.py::test_fps PASSED [ 4%] tests/pytorch/test_geometry.py::test_fps_start_idx PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-euclidean-bruteforce-blas] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-euclidean-bruteforce] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-euclidean-kd-tree] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-cosine-bruteforce-blas] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-cosine-bruteforce] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[False-cosine-kd-tree] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-euclidean-bruteforce-blas] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-euclidean-bruteforce] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-euclidean-kd-tree] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-cosine-bruteforce-blas] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-cosine-bruteforce] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cpu[True-cosine-kd-tree] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cuda[False-euclidean-bruteforce-blas] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cuda[False-euclidean-bruteforce] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cuda[False-euclidean-bruteforce-sharemem] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cuda[False-cosine-bruteforce-blas] PASSED [ 4%] tests/pytorch/test_geometry.py::test_knn_cuda[False-cosine-bruteforce] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[False-cosine-bruteforce-sharemem] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-euclidean-bruteforce-blas] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-euclidean-bruteforce] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-euclidean-bruteforce-sharemem] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-cosine-bruteforce-blas] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-cosine-bruteforce] PASSED [ 5%] tests/pytorch/test_geometry.py::test_knn_cuda[True-cosine-bruteforce-sharemem] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g0-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g0-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g1-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g1-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g2-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g2-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g3-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g3-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g4-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g4-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g5-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g5-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g6-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-True-g6-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g0-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g0-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g1-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g1-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g2-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g2-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g3-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g3-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g4-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g4-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g5-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g5-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g6-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[True-False-g6-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g0-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g0-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g1-idtype0] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g1-idtype1] PASSED [ 5%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g2-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g2-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g3-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g3-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g4-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g4-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g5-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g5-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g6-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-True-g6-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g0-idtype0] [Pipeline] // node [Pipeline] { [Pipeline] } [Pipeline] // stage [Pipeline] } [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g0-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g1-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g1-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g2-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g2-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g3-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g3-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g4-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g4-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g5-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g5-idtype1] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g6-idtype0] PASSED [ 6%] tests/pytorch/test_geometry.py::test_edge_coarsening[False-False-g6-idtype1] PASSED [ 6%] tests/pytorch/test_ipc.py::test_torch_ipc Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv0[1] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv0[2] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g0-idtype0] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g0-idtype1] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g1-idtype0] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g1-idtype1] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g2-idtype0] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g2-idtype1] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g3-idtype0] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g3-idtype1] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g4-idtype0] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g4-idtype1] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g5-idtype0] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g5-idtype1] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g6-idtype0] PASSED [ 6%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g6-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g7-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g7-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g0-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g0-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g1-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g1-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g2-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g2-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g3-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g3-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g4-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g4-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g5-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g5-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g6-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g6-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g7-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-both-g7-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g0-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g0-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g1-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g1-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g2-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g2-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g3-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g3-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g4-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g4-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g5-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g5-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g6-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g6-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g7-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-right-g7-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g0-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g0-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g1-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g1-idtype1] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g2-idtype0] PASSED [ 7%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g2-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g3-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g3-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g4-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g4-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g5-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g5-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g6-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g6-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g7-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-True-left-g7-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g0-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g0-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g1-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g1-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g2-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g2-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g3-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g3-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g4-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g4-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g5-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g5-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g6-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g6-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g7-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-none-g7-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g0-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g0-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g1-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g1-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g2-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g2-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g3-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g3-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g4-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g4-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g5-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g5-idtype1] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g6-idtype0] PASSED [ 8%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g6-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g7-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-both-g7-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g0-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g0-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g1-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g1-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g2-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g2-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g3-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g3-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g4-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g4-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g5-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g5-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g6-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g6-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g7-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-right-g7-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g0-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g0-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g1-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g1-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g2-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g2-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g3-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g3-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g4-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g4-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g5-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g5-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g6-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g6-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g7-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-True-False-left-g7-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g0-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g0-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g1-idtype0] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g1-idtype1] PASSED [ 9%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g2-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g2-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g3-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g3-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g4-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g4-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g5-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g5-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g6-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g6-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g7-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-none-g7-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g0-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g0-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g1-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g1-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g2-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g2-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g3-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g3-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g4-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g4-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g5-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g5-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g6-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g6-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g7-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-both-g7-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g0-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g0-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g1-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g1-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g2-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g2-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g3-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g3-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g4-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g4-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g5-idtype0] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g5-idtype1] PASSED [ 10%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g6-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g6-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g7-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-right-g7-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g0-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g0-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g1-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g1-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g2-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g2-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g3-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g3-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g4-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g4-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g5-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g5-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g6-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g6-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g7-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-True-left-g7-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g0-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g0-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g1-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g1-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g2-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g2-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g3-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g3-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g4-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g4-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g5-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g5-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g6-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g6-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g7-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-none-g7-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g0-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g0-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g1-idtype0] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g1-idtype1] PASSED [ 11%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g2-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g2-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g3-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g3-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g4-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g4-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g5-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g5-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g6-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g6-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g7-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-both-g7-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g0-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g0-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g1-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g1-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g2-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g2-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g3-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g3-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g4-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g4-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g5-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g5-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g6-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g6-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g7-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-right-g7-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g0-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g0-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g1-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g1-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g2-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g2-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g3-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g3-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g4-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g4-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g5-idtype0] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g5-idtype1] PASSED [ 12%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g6-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g6-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g7-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[1-False-False-left-g7-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g0-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g0-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g1-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g1-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g2-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g2-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g3-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g3-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g4-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g4-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g5-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g5-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g6-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g6-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g7-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-none-g7-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g0-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g0-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g1-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g1-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g2-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g2-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g3-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g3-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g4-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g4-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g5-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g5-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g6-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g6-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g7-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-both-g7-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g0-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g0-idtype1] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g1-idtype0] PASSED [ 13%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g1-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g2-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g2-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g3-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g3-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g4-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g4-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g5-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g5-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g6-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g6-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g7-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-right-g7-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g0-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g0-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g1-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g1-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g2-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g2-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g3-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g3-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g4-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g4-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g5-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g5-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g6-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g6-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g7-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-True-left-g7-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g0-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g0-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g1-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g1-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g2-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g2-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g3-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g3-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g4-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g4-idtype1] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g5-idtype0] PASSED [ 14%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g5-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g6-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g6-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g7-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-none-g7-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g0-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g0-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g1-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g1-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g2-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g2-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g3-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g3-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g4-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g4-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g5-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g5-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g6-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g6-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g7-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-both-g7-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g0-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g0-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g1-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g1-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g2-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g2-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g3-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g3-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g4-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g4-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g5-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g5-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g6-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g6-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g7-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-right-g7-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g0-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g0-idtype1] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g1-idtype0] PASSED [ 15%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g1-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g2-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g2-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g3-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g3-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g4-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g4-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g5-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g5-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g6-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g6-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g7-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-True-False-left-g7-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g0-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g0-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g1-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g1-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g2-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g2-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g3-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g3-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g4-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g4-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g5-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g5-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g6-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g6-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g7-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-none-g7-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g0-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g0-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g1-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g1-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g2-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g2-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g3-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g3-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g4-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g4-idtype1] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g5-idtype0] PASSED [ 16%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g5-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g6-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g6-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g7-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-both-g7-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g0-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g0-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g1-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g1-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g2-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g2-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g3-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g3-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g4-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g4-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g5-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g5-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g6-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g6-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g7-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-right-g7-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g0-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g0-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g1-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g1-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g2-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g2-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g3-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g3-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g4-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g4-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g5-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g5-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g6-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g6-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g7-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-True-left-g7-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g0-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g0-idtype1] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g1-idtype0] PASSED [ 17%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g1-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g2-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g2-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g3-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g3-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g4-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g4-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g5-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g5-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g6-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g6-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g7-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-none-g7-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g0-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g0-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g1-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g1-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g2-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g2-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g3-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g3-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g4-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g4-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g5-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g5-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g6-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g6-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g7-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-both-g7-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g0-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g0-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g1-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g1-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g2-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g2-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g3-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g3-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g4-idtype0] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g4-idtype1] PASSED [ 18%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g5-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g5-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g6-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g6-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g7-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-right-g7-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g0-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g0-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g1-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g1-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g2-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g2-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g3-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g3-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g4-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g4-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g5-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g5-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g6-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g6-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g7-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv[2-False-False-left-g7-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-none-g0-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-none-g0-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-both-g0-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-both-g0-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-right-g0-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-True-right-g0-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-none-g0-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-none-g0-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-both-g0-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-both-g0-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-right-g0-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-True-False-right-g0-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-none-g0-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-none-g0-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-both-g0-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-both-g0-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-right-g0-idtype0] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-True-right-g0-idtype1] PASSED [ 19%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-none-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-none-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-both-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-both-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-right-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[1-False-False-right-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-none-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-none-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-both-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-both-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-right-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-True-right-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-none-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-none-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-both-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-both-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-right-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-True-False-right-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-none-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-none-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-both-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-both-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-right-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-True-right-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-none-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-none-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-both-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-both-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-right-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight[2-False-False-right-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-none-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-none-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-both-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-both-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-right-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-True-right-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-none-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-none-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-both-g0-idtype0] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-both-g0-idtype1] PASSED [ 20%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-right-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-True-False-right-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-none-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-none-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-both-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-both-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-right-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-True-right-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-none-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-none-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-both-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-both-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-right-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[1-False-False-right-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-none-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-none-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-both-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-both-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-right-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-True-right-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-none-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-none-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-both-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-both-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-right-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-True-False-right-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-none-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-none-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-both-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-both-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-right-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-True-right-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-none-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-none-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-both-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-both-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-right-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_e_weight_norm[2-False-False-right-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g0-idtype0] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g0-idtype1] PASSED [ 21%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g1-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-none-g1-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g0-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g0-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g1-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-both-g1-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g0-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g0-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g1-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-True-right-g1-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g0-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g0-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g1-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-none-g1-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g0-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g0-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g1-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-both-g1-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g0-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g0-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g1-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-True-False-right-g1-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g0-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g0-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g1-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-none-g1-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g0-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g0-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g1-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-both-g1-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g0-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g0-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g1-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-True-right-g1-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g0-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g0-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g1-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-none-g1-idtype1] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g0-idtype0] PASSED [ 22%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g0-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g1-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-both-g1-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g0-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g0-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g1-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[1-False-False-right-g1-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g0-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g0-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g1-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-none-g1-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g0-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g0-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g1-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-both-g1-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g0-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g0-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g1-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-True-right-g1-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g0-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g0-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g1-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-none-g1-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g0-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g0-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g1-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-both-g1-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g0-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g0-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g1-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-True-False-right-g1-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g0-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g0-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g1-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-none-g1-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g0-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g0-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g1-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-both-g1-idtype1] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g0-idtype0] PASSED [ 23%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g0-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g1-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-True-right-g1-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g0-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g0-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g1-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-none-g1-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g0-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g0-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g1-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-both-g1-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g0-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g0-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g1-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_graph_conv_bi[2-False-False-right-g1-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_tagconv[1] PASSED [ 24%] tests/pytorch/test_nn.py::test_tagconv[2] PASSED [ 24%] tests/pytorch/test_nn.py::test_set2set PASSED [ 24%] tests/pytorch/test_nn.py::test_glob_att_pool PASSED [ 24%] tests/pytorch/test_nn.py::test_simple_pool PASSED [ 24%] tests/pytorch/test_nn.py::test_set_trans PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn[1-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn[1-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn[8-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn[8-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn[32-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn[32-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[1-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[1-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[10-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[10-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[40-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_rgcn_default_nbasis[40-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g0-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g0-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g1-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g1-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g2-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g2-idtype1] PASSED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g3-idtype0] PASSED [ 24%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g3-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g4-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g4-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g5-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g5-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g6-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-1-g6-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g0-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g0-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g1-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g1-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g2-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g2-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g3-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g3-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g4-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g4-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g5-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g5-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g6-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[1-5-g6-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g0-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g0-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g1-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g1-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g2-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g2-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g3-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g3-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g4-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g4-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g5-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g5-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g6-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-1-g6-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g0-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g0-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g1-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g1-idtype1] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g2-idtype0] PASSED [ 25%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g2-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g3-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g3-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g4-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g4-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g5-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g5-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g6-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv[4-5-g6-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g0-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g0-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g1-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-1-g1-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g0-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g0-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g1-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[1-2-g1-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g0-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g0-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g1-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-1-g1-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g0-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g0-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g1-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gat_conv_bi[4-2-g1-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g0-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g0-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g1-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g1-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g2-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g2-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g3-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g3-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g4-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g4-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g5-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g5-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g6-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-1-g6-idtype1] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g0-idtype0] PASSED [ 26%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g0-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g1-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g1-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g2-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g2-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g3-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g3-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g4-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g4-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g5-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g5-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g6-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[1-5-g6-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g0-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g0-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g1-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g1-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g2-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g2-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g3-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g3-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g4-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g4-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g5-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g5-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g6-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-1-g6-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g0-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g0-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g1-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g1-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g2-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g2-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g3-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g3-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g4-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g4-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g5-idtype0] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g5-idtype1] PASSED [ 27%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g6-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv[4-5-g6-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g0-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g0-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g1-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-1-g1-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g0-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g0-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g1-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[1-2-g1-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g0-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g0-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g1-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-1-g1-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g0-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g0-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g1-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_gatv2_conv_bi[4-2-g1-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g0-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g0-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g1-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g1-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g2-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g2-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g3-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g3-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g4-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g4-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g5-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-1-g5-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g0-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g0-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g1-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g1-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g2-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g2-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g3-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g3-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g4-idtype0] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g4-idtype1] PASSED [ 28%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g5-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-1-5-g5-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g0-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g0-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g1-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g1-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g2-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g2-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g3-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g3-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g4-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g4-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g5-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-1-g5-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g0-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g0-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g1-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g1-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g2-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g2-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g3-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g3-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g4-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g4-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g5-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[1-5-5-g5-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g0-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g0-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g1-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g1-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g2-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g2-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g3-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g3-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g4-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g4-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g5-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-1-g5-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g0-idtype0] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g0-idtype1] PASSED [ 29%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g1-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g1-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g2-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g2-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g3-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g3-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g4-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g4-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g5-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-1-5-g5-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g0-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g0-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g1-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g1-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g2-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g2-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g3-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g3-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g4-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g4-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g5-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-1-g5-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g0-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g0-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g1-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g1-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g2-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g2-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g3-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g3-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g4-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g4-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g5-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv[4-5-5-g5-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g0-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g0-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g1-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-1-g1-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g0-idtype0] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g0-idtype1] PASSED [ 30%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g1-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-1-5-g1-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g0-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g0-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g1-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-1-g1-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g0-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g0-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g1-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[1-5-5-g1-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g0-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g0-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g1-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-1-g1-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g0-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g0-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g1-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-1-5-g1-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g0-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g0-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g1-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-1-g1-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g0-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g0-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g1-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_egat_conv_bi[4-5-5-g1-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g0-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g0-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g1-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g1-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g2-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g2-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g3-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g3-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g4-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g4-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g5-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g5-idtype1] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g6-idtype0] PASSED [ 31%] tests/pytorch/test_nn.py::test_sage_conv[mean-g6-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[mean-g7-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[mean-g7-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g0-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g0-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g1-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g1-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g2-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g2-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g3-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g3-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g4-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g4-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g5-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g5-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g6-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g6-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g7-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[pool-g7-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g0-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g0-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g1-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g1-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g2-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g2-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g3-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g3-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g4-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g4-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g5-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g5-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g6-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g6-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g7-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[gcn-g7-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g0-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g0-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g1-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g1-idtype1] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g2-idtype0] PASSED [ 32%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g2-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g3-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g3-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g4-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g4-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g5-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g5-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g6-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g6-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g7-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv[lstm-g7-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g0-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g0-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g1-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g1-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g2-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-mean-g2-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g0-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g0-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g1-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g1-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g2-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-pool-g2-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g0-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g0-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g1-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g1-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g2-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-gcn-g2-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g0-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g0-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g1-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g1-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g2-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[1-lstm-g2-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g0-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g0-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g1-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g1-idtype1] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g2-idtype0] PASSED [ 33%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-mean-g2-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g0-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g0-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g1-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g1-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g2-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-pool-g2-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g0-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g0-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g1-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g1-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g2-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-gcn-g2-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g0-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g0-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g1-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g1-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g2-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv_bi[2-lstm-g2-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv2[1-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv2[1-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv2[2-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sage_conv2[2-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g0-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g0-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g1-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g1-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g2-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g2-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g3-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g3-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g4-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g4-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g5-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[1-g5-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g0-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g0-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g1-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g1-idtype1] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g2-idtype0] PASSED [ 34%] tests/pytorch/test_nn.py::test_sgc_conv[2-g2-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g3-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g3-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g4-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g4-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g5-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_sgc_conv[2-g5-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g0-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g0-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g1-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g1-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g2-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g2-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g3-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g3-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g4-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g4-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g5-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv[g5-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g0-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g0-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g1-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g1-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g2-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g2-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g3-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g3-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g4-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g4-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g5-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_appnp_conv_e_weight[g5-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g0-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g0-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g1-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g1-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g2-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g2-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g3-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g3-idtype1] PASSED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g4-idtype0] PASSED [ 35%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g4-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g5-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[True-g5-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g0-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g0-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g1-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g1-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g2-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g2-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g3-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g3-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g4-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g4-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g5-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_gcn2conv_e_weight[False-g5-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g0-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g0-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g1-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g1-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g2-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g2-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g3-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g3-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g4-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g4-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g5-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_sgconv_e_weight[g5-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g0-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g0-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g1-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g1-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g2-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g2-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g3-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g3-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g4-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g4-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g5-idtype0] PASSED [ 36%] tests/pytorch/test_nn.py::test_tagconv_e_weight[g5-idtype1] PASSED [ 36%] tests/pytorch/test_nn.py::test_gin_conv[mean-g0-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g0-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g1-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g1-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g2-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g2-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g3-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g3-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g4-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g4-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g5-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g5-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g6-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[mean-g6-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g0-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g0-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g1-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g1-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g2-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g2-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g3-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g3-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g4-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g4-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g5-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g5-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g6-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[max-g6-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g0-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g0-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g1-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g1-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g2-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g2-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g3-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g3-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g4-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g4-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g5-idtype0] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g5-idtype1] PASSED [ 37%] tests/pytorch/test_nn.py::test_gin_conv[sum-g6-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv[sum-g6-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g0-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g0-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g1-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g1-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g2-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g2-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g3-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g3-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g4-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g4-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g5-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g5-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g6-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g6-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g7-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gine_conv[g7-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g0-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g0-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g1-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[mean-g1-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[max-g0-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[max-g0-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[max-g1-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[max-g1-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g0-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g0-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g1-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_gin_conv_bi[sum-g1-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g0-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g0-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g1-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g1-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g2-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g2-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g3-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g3-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g4-idtype0] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g4-idtype1] PASSED [ 38%] tests/pytorch/test_nn.py::test_agnn_conv[g5-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv[g5-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv[g6-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv[g6-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv_bi[g0-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv_bi[g0-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv_bi[g1-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_agnn_conv_bi[g1-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g0-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g0-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g1-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g1-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g2-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g2-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g3-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g3-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g4-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g4-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g5-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv[g5-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g0-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g0-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g1-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g1-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g2-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g2-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g3-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g3-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g4-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g4-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g5-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_gated_graph_conv_one_etype[g5-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g0-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g0-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g1-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g1-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g2-idtype0] PASSED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g2-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g3-idtype0] PASSED [ 5%] tests/compute/test_data.py::test_reddit PASSED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g3-idtype1] PASSED [ 39%] tests/pytorch/test_nn.py::test_nn_conv[g4-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_nn_conv[g4-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_nn_conv[g5-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_nn_conv[g5-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_nn_conv[g6-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_nn_conv[g6-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_nn_conv_bi[g0-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_nn_conv_bi[g0-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_nn_conv_bi[g1-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_nn_conv_bi[g1-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g0-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g0-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g1-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g1-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g2-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g2-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g3-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g3-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g4-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g4-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g5-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv[g5-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv_bi[g0-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv_bi[g0-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv_bi[g1-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_gmm_conv_bi[g1-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-both-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-both-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-right-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-right-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-none-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g0-none-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-both-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-both-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-right-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-right-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-none-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g1-none-idtype1] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-both-idtype0] PASSED [ 40%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-both-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-right-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-right-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-none-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g2-none-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-both-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-both-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-right-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-right-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-none-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g3-none-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-both-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-both-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-right-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-right-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-none-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g4-none-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-both-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-both-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-right-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-right-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-none-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g5-none-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-both-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-both-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-right-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-right-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-none-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g6-none-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-both-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-both-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-right-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-right-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-none-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[1-g7-none-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-both-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-both-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-right-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-right-idtype1] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-none-idtype0] PASSED [ 41%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g0-none-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-both-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-both-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-right-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-right-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-none-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g1-none-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-both-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-both-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-right-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-right-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-none-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g2-none-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-both-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-both-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-right-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-right-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-none-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g3-none-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-both-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-both-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-right-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-right-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-none-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g4-none-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-both-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-both-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-right-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-right-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-none-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g5-none-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-both-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-both-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-right-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-right-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-none-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g6-none-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-both-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-both-idtype1] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-right-idtype0] PASSED [ 42%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-right-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-none-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_graph_conv[2-g7-none-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g0-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g0-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g1-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g1-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g2-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g2-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g3-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g3-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g4-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g4-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g5-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g5-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g6-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g6-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g7-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g7-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g8-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g8-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g9-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[1-g9-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g0-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g0-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g1-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g1-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g2-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g2-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g3-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g3-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g4-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g4-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g5-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g5-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g6-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g6-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g7-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g7-idtype1] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g8-idtype0] PASSED [ 43%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g8-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g9-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_dense_sage_conv[2-g9-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g0-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g0-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g1-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g1-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g2-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g2-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g3-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g3-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g4-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g4-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g5-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g5-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g6-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[1-g6-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g0-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g0-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g1-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g1-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g2-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g2-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g3-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g3-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g4-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g4-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g5-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g5-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g6-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv[2-g6-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[1-g0-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[1-g0-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[1-g1-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[1-g1-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[2-g0-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[2-g0-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[2-g1-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_edge_conv_bi[2-g1-idtype1] PASSED [ 44%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g0-idtype0] PASSED [ 44%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g0-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g1-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g1-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g2-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g2-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g3-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g3-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g4-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g4-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g5-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g5-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g6-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-1-g6-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g0-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g0-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g1-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g1-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g2-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g2-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g3-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g3-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g4-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g4-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g5-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g5-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g6-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[1-2-g6-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g0-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g0-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g1-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g1-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g2-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g2-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g3-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g3-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g4-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g4-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g5-idtype0] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g5-idtype1] PASSED [ 45%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g6-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-1-g6-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g0-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g0-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g1-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g1-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g2-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g2-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g3-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g3-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g4-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g4-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g5-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g5-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g6-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv[4-2-g6-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g0-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g0-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g1-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-1-g1-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g0-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g0-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g1-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[1-2-g1-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g0-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g0-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g1-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-1-g1-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g0-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g0-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g1-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_dotgat_conv_bi[4-2-g1-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dense_cheb_conv[1] PASSED [ 46%] tests/pytorch/test_nn.py::test_dense_cheb_conv[2] PASSED [ 46%] tests/pytorch/test_nn.py::test_sequential PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g0-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g0-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g1-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g1-idtype1] PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g2-idtype0] PASSED [ 46%] tests/pytorch/test_nn.py::test_atomic_conv[g2-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g3-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g3-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g4-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g4-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g5-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_atomic_conv[g5-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g0-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g0-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g1-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g1-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g2-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g2-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g3-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g3-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g4-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g4-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g5-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g5-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g6-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g6-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g7-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[1-g7-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g0-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g0-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g1-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g1-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g2-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g2-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g3-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g3-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g4-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g4-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g5-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g5-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g6-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g6-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g7-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_cf_conv[3-g7-idtype1] PASSED [ 47%] tests/pytorch/test_nn.py::test_hetero_conv[False-sum-idtype0] PASSED [ 47%] tests/pytorch/test_nn.py::test_hetero_conv[False-sum-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-max-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-max-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-min-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-min-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-mean-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-mean-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-stack-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-stack-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-myagg-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[False-myagg-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-sum-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-sum-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-max-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-max-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-min-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-min-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-mean-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-mean-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-stack-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-stack-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-myagg-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_conv[True-myagg-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_linear[1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_linear[2] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_linear[100] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_embedding[1] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_embedding[2] PASSED [ 48%] tests/pytorch/test_nn.py::test_hetero_embedding[100] PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g0-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g0-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g1-idtype0] PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g1-idtype1] PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g2-idtype0] Cleaning workspace Fetching without tags PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g2-idtype1] Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g3-idtype0] Commit message: "fix for pytorch < 1.12" Cleaning workspace PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g3-idtype1] [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g4-idtype0] + docker pull dgllib/dgl-ci-cpu:ssh_v220818 PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g4-idtype1] > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g5-idtype0] ssh_v220818: Pulling from dgllib/dgl-ci-cpu Digest: sha256:9c66e1df96cfae9cb9aa9ba39ef4921e152409d1447d6b123659ff4f84b4291f Status: Image is up to date for dgllib/dgl-ci-cpu:ssh_v220818 docker.io/dgllib/dgl-ci-cpu:ssh_v220818 [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh PASSED [ 48%] tests/pytorch/test_nn.py::test_gnnexplainer[1-g5-idtype1] + docker inspect -f . dgllib/dgl-ci-cpu:ssh_v220818 . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dgl-manual-large-cpu does not seem to be running inside a container $ docker run -t -d -u 0:0 --shm-size=4gb -w /root/jenkins/workspace/dgl_PR-4648 -v /root/jenkins/workspace/dgl_PR-4648:/root/jenkins/workspace/dgl_PR-4648:rw,z -v /root/jenkins/workspace/dgl_PR-4648@tmp:/root/jenkins/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-cpu:ssh_v220818 cat PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g0-idtype0] PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g0-idtype1] PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g1-idtype0] PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g1-idtype1] $ docker top 9d11908e8a1623fe2cb3ee8ecb53c2f89a04f190533b91026267498f2decd76b -eo pid,comm ERROR: The container started but didn't run the expected command. Please double check your ENTRYPOINT does execute the command passed as docker run argument, as required by official docker images (see https://github.com/docker-library/official-images#consistency for entrypoint consistency requirements). Alternatively you can force image entrypoint to be disabled by adding option `--entrypoint=''`. [Pipeline] { [Pipeline] stage [Pipeline] { (Distributed Torch CPU Unit test) [Pipeline] sh PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g2-idtype0] + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@487ac80b; decorates RemoteLauncher[hudson.remoting.Channel@1d9b9638:dgl-manual-large-cpu] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g2-idtype1] Fetching without tags PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g3-idtype0] Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g3-idtype1] [Pipeline] sh PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g4-idtype0] + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS'... PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g4-idtype1] PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g5-idtype0] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dlpack'... PASSED [ 49%] tests/pytorch/test_nn.py::test_gnnexplainer[2-g5-idtype1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... PASSED [ 49%] tests/pytorch/test_nn.py::test_jumping_knowledge PASSED [ 49%] tests/pytorch/test_nn.py::test_edge_predictor[dot] PASSED [ 49%] tests/pytorch/test_nn.py::test_edge_predictor[cos] PASSED [ 49%] tests/pytorch/test_nn.py::test_edge_predictor[ele] PASSED [ 49%] tests/pytorch/test_nn.py::test_edge_predictor[cat] PASSED [ 49%] tests/pytorch/test_nn.py::test_ke_score_funcs PASSED [ 49%] tests/pytorch/test_nn.py::test_twirls PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[None-None-4] PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[None-None-32] PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[basis-4-4] PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[basis-4-32] PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[bdd-4-4] PASSED [ 49%] tests/pytorch/test_nn.py::test_typed_linear[bdd-4-32] PASSED [ 49%] tests/pytorch/test_nn.py::test_hgt[1-4-idtype0] PASSED [ 49%] tests/pytorch/test_nn.py::test_hgt[1-4-idtype1] PASSED [ 49%] tests/pytorch/test_nn.py::test_radius_graph[True-True] PASSED [ 49%] tests/pytorch/test_nn.py::test_radius_graph[True-False] PASSED [ 49%] tests/pytorch/test_nn.py::test_radius_graph[False-True] PASSED [ 49%] tests/pytorch/test_nn.py::test_radius_graph[False-False] PASSED [ 49%] tests/pytorch/test_nn.py::test_group_rev_res[idtype0] PASSED [ 49%] tests/pytorch/test_nn.py::test_group_rev_res[idtype1] PASSED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-16-16-16] PASSED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-16-16-32] PASSED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-16-32-16] PASSED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-16-32-32] PASSED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-32-16-16] PASSED [ 49%] tests/pytorch/test_nn.py::test_egnn_conv[16-32-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[16-32-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[16-32-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-16-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-16-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-16-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-16-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-32-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-32-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-32-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[10-32-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-16-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-16-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-16-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-16-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-32-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-32-16-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/googletest'... PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-32-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_egnn_conv[0-32-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 50%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 51%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 52%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 53%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators1-16-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 54%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-16-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 55%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 56%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 57%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 58%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 59%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[True-0-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 60%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 61%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 62%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 63%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-16-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 64%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 65%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 66%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 67%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 68%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 69%] tests/pytorch/test_nn.py::test_pna_conv[False-0-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.0-3] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.0-5] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.5-3] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-0.5-5] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-1.0-3] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-sym-1.0-5] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.0-3] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.0-5] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.5-3] PASSED [ 69%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-0.5-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-1.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-True-row-1.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.5-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-0.5-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-1.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-sym-1.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.5-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-0.5-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-1.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-True-False-row-1.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.5-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-0.5-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-1.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-sym-1.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.5-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-0.5-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-1.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-True-row-1.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.5-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-0.5-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-1.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-sym-1.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.5-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-0.5-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-1.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[True-False-False-row-1.0-5] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.0-3] PASSED [ 70%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.5-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-0.5-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-1.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-sym-1.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.5-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-0.5-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-1.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-True-row-1.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.5-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-0.5-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-1.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-sym-1.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.5-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-0.5-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-1.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-True-False-row-1.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.5-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-0.5-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-1.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-sym-1.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.5-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-0.5-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-1.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-True-row-1.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.0-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.5-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-0.5-5] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-1.0-3] PASSED [ 71%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-sym-1.0-5] PASSED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.0-3] PASSED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.0-5] PASSED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.5-3] PASSED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-0.5-5] PASSED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-1.0-3] PASSED [ 72%] tests/pytorch/test_nn.py::test_label_prop[False-False-False-row-1.0-5] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 72%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 73%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 74%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 75%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-16-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 76%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 77%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 78%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 79%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 80%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 81%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 82%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 83%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 84%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators0-32-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators2-16-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nccl'... PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators2-16-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/phmap'... PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 85%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators2-16-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators2-16-32] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust'... PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-16-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 86%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators0-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 87%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 88%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-1-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators0-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators0-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators0-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators0-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators1-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators1-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators1-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators1-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators2-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators2-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators2-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers0-aggregators2-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators0-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators0-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators0-32-16] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm'... PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators0-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators1-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators1-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators1-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators1-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators2-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators2-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators2-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-2.5-scalers1-aggregators2-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators0-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators0-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators0-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators0-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators1-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators1-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators1-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators1-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators2-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators2-16-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators2-32-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers0-aggregators2-32-32] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators0-16-16] PASSED [ 89%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators0-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators0-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators0-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators1-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators1-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators1-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators1-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators2-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators2-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators2-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.0-7.4-scalers1-aggregators2-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators0-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators0-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators0-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators0-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators1-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators1-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators1-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators1-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators2-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators2-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators2-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers0-aggregators2-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators0-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators0-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators0-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators0-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators1-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators1-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators1-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators1-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators2-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators2-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators2-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-2.5-scalers1-aggregators2-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators0-16-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators0-16-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators0-32-16] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators0-32-32] PASSED [ 90%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators1-16-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators1-16-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators1-32-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators1-32-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators2-16-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators2-16-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators2-32-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers0-aggregators2-32-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators0-16-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators0-16-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators0-32-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators0-32-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators1-16-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators1-16-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators1-32-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators1-32-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators2-16-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators2-16-32] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators2-32-16] PASSED [ 91%] tests/pytorch/test_nn.py::test_dgn_conv[False-0-4-0.1-7.4-scalers1-aggregators2-32-32] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam[1] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam[4] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam[101] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam[1024] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1-False] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1-True] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1-None] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[4-False] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[4-True] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[4-None] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[101-False] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[101-True] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[101-None] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1024-False] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1024-True] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_uva[1024-None] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[1-dtype0] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[1-dtype1] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[4-dtype0] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[4-dtype1] PASSED [ 91%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[101-dtype0] PASSED [ 92%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[101-dtype1] PASSED [ 92%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[1024-dtype0] PASSED [ 92%] tests/pytorch/test_optim.py::test_sparse_adam_dtype[1024-dtype1] PASSED [ 92%] tests/pytorch/test_optim.py::test_sparse_adam_zero_step PASSED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_cpu_sparse_adam[2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_cpu_sparse_adam[4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[nccl-2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[nccl-4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[nccl-8] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[gloo-2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[gloo-4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam[gloo-8] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_cuda_tensor[2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_cuda_tensor[4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_cuda_tensor[8] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_cpu_zero_step[2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_cpu_zero_step[4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[nccl-2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[nccl-4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[nccl-8] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[gloo-2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[gloo-4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step[gloo-8] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step_cuda_tensor[2] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step_cuda_tensor[4] SKIPPED [ 92%] tests/pytorch/test_optim.py::test_multiprocess_sparse_adam_zero_step_cuda_tensor[8] SKIPPED [ 92%] tests/pytorch/test_pickle.py::test_pickling_batched_graph PASSED [ 92%] tests/pytorch/test_pin_memory.py::test_pin_noncontiguous PASSED [ 92%] tests/pytorch/test_pin_memory.py::test_pin_view PASSED [ 92%] tests/pytorch/test_pin_memory.py::test_unpin_automatically PASSED [ 92%] tests/pytorch/test_pin_memory.py::test_pin_unpin_column PASSED [ 92%] tests/pytorch/test_pin_memory.py::test_pin_empty PASSED [ 92%] tests/pytorch/test_sparse_emb.py::test_multiprocess_sparse_emb_get_set[1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... PASSED [ 92%] tests/pytorch/test_sparse_emb.py::test_multiprocess_sparse_emb_get_set[2] SKIPPED [ 92%] tests/pytorch/test_sparse_emb.py::test_multiprocess_sparse_emb_get_set[3] SKIPPED [ 92%] tests/pytorch/test_stream.py::test_basics PASSED [ 92%] tests/pytorch/test_stream.py::test_set_get_stream PASSED [ 92%] tests/pytorch/test_stream.py::test_record_stream_ndarray PASSED [ 92%] tests/pytorch/test_stream.py::test_record_stream_graph_positive Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... PASSED [ 92%] tests/pytorch/test_stream.py::test_record_stream_graph_negative PASSED [ 93%] tests/pytorch/test_unified_tensor.py::test_unified_tensor PASSED [ 93%] tests/pytorch/test_unified_tensor.py::test_multi_gpu_unified_tensor[1] Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... PASSED [ 93%] tests/pytorch/test_unified_tensor.py::test_multi_gpu_unified_tensor[2] SKIPPED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[None-val_shape0] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[None-val_shape1] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[mat_shape1-val_shape0] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[mat_shape1-val_shape1] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[mat_shape2-val_shape0] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_diag[mat_shape2-val_shape1] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[None-shape0] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[None-shape1] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[None-shape2] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[2-shape0] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[2-shape1] PASSED [ 93%] tests/pytorch/mock_sparse/test_diag.py::test_identity[2-shape2] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[add-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[add-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[add-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[add-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[sub-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[sub-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[sub-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[sub-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[mul-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[mul-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[mul-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[mul-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[truediv-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[truediv-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[truediv-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_sparse[truediv-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[add-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[add-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[add-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[add-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[sub-dtype0-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[sub-dtype0-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[sub-dtype1-idtype0] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[sub-dtype1-idtype1] PASSED [ 93%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[mul-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[mul-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[mul-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[mul-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[truediv-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[truediv-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[truediv-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_diag[truediv-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2.5-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2.5-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2.5-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_sparse_op_scalar[2.5-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2.5-dtype0-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2.5-dtype0-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2.5-dtype1-idtype0] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_scalar_op_sparse[2.5-dtype1-idtype1] PASSED [ 94%] tests/pytorch/mock_sparse/test_elementwise_op_sp.py::test_expose_op PASSED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col0-row0-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col0-row0-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col0-row1-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col0-row1-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col1-row0-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col1-row0-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col1-row1-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape0-col1-row1-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col0-row0-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col0-row0-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col0-row1-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col0-row1-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col1-row0-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col1-row0-2] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col1-row1-None] SKIPPED [ 94%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-sum-extra_shape1-col1-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col0-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col0-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col0-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col0-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col1-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col1-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col1-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape0-col1-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col0-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col0-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col0-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col0-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col1-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col1-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col1-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smax-extra_shape1-col1-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col0-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col0-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col0-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col0-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col1-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col1-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col1-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape0-col1-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col0-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col0-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col0-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col0-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col1-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col1-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col1-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smin-extra_shape1-col1-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col0-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col0-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col0-row1-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col0-row1-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col1-row0-None] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col1-row0-2] SKIPPED [ 95%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col1-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape0-col1-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col0-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col0-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col0-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col0-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col1-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col1-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col1-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[None-smean-extra_shape1-col1-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col0-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col0-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col0-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col0-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col1-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col1-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col1-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape0-col1-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col0-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col0-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col0-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col0-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col1-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col1-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col1-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-sum-extra_shape1-col1-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col0-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col0-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col0-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col0-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col1-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col1-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col1-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape0-col1-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col0-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col0-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col0-row1-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col0-row1-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col1-row0-None] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col1-row0-2] SKIPPED [ 96%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col1-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smax-extra_shape1-col1-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col0-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col0-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col0-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col0-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col1-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col1-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col1-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape0-col1-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col0-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col0-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col0-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col0-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col1-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col1-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col1-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smin-extra_shape1-col1-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col0-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col0-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col0-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col0-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col1-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col1-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col1-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape0-col1-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col0-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col0-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col0-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col0-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col1-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col1-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col1-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[0-smean-extra_shape1-col1-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col0-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col0-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col0-row1-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col0-row1-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col1-row0-None] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col1-row0-2] SKIPPED [ 97%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col1-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape0-col1-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col0-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col0-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col0-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col0-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col1-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col1-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col1-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-sum-extra_shape1-col1-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col0-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col0-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col0-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col0-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col1-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col1-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col1-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape0-col1-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col0-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col0-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col0-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col0-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col1-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col1-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col1-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smax-extra_shape1-col1-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col0-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col0-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col0-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col0-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col1-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col1-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col1-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape0-col1-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col0-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col0-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col0-row1-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col0-row1-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col1-row0-None] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col1-row0-2] SKIPPED [ 98%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col1-row1-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smin-extra_shape1-col1-row1-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col0-row0-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col0-row0-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col0-row1-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col0-row1-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col1-row0-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col1-row0-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col1-row1-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape0-col1-row1-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col0-row0-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col0-row0-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col0-row1-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col0-row1-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col1-row0-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col1-row0-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col1-row1-None] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_reduction.py::test_reduction[1-smean-extra_shape1-col1-row1-2] SKIPPED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[None-val_shape0] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[None-val_shape1] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[mat_shape1-val_shape0] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[mat_shape1-val_shape1] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[mat_shape2-val_shape0] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_diag_matrix_transpose[mat_shape2-val_shape1] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col0-row0-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col0-row0-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col0-row1-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col0-row1-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col1-row0-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col1-row0-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col1-row1-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape0-col1-row1-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col0-row0-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col0-row0-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col0-row1-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col0-row1-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col1-row0-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col1-row0-2] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col1-row1-None] PASSED [ 99%] tests/pytorch/mock_sparse/test_transpose.py::test_sparse_matrix_transpose[extra_shape1-col1-row1-2] PASSED [100%] =============================== warnings summary =============================== python/dgl/backend/pytorch/tensor.py:16 python/dgl/backend/pytorch/tensor.py:16 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:16: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) < LooseVersion("1.9.0"): python/dgl/backend/pytorch/tensor.py:340 python/dgl/backend/pytorch/tensor.py:340 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:340: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) >= LooseVersion("1.10.0"): python/dgl/dataloading/dataloader.py:33 /root/jenkins/workspace/dgl_PR-4648/python/dgl/dataloading/dataloader.py:33: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(torch.__version__) python/dgl/_dataloading/pytorch/dataloader.py:23 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:23: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(th.__version__) python/dgl/_dataloading/pytorch/dataloader.py:24 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:24: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_16 = PYTORCH_VER >= LooseVersion("1.6.0") python/dgl/_dataloading/pytorch/dataloader.py:25 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:25: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_17 = PYTORCH_VER >= LooseVersion("1.7.0") tests/pytorch/test_dataloader.py::test_graph_dataloader /root/jenkins/workspace/dgl_PR-4648/python/dgl/data/minigc.py:159: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations self.labels = F.tensor(np.array(self.labels).astype(np.int)) tests/pytorch/test_dataloader.py: 10 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/dataloading/dataloader.py:863: DGLWarning: Dataloader CPU affinity opt is not enabled, consider switching it on (see enable_cpu_affinity() or CPU best practices for DGL [https://docs.dgl.ai/tutorials/cpu/cpu_best_practises.html]) dgl_warning(f'Dataloader CPU affinity opt is not enabled, consider switching it on ' tests/pytorch/test_dataloader.py: 700 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/dataloading/dataloader.py:78: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if PYTORCH_VER >= LooseVersion("1.10.0"): tests/pytorch/test_dataloader.py: 128 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/dataloading/dataloader.py:83: DGLWarning: The current output_nodes are out of order even if set shuffle to False in Dataloader, the reason is that the current version of torch dose not support stable sort. Please update torch to 1.10.0 or higher to fix it. 'The current output_nodes are out of order even if set shuffle ' tests/pytorch/test_nn.py: 11 warnings tests/pytorch/test_pickle.py: 1 warning /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/pytorch/test_nn.py::test_set_trans /root/jenkins/workspace/dgl_PR-4648/python/dgl/nn/pytorch/glob.py:700: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). lengths_x = th.tensor(lengths_x, dtype=th.int64, device=device) tests/pytorch/test_nn.py::test_set_trans /root/jenkins/workspace/dgl_PR-4648/python/dgl/nn/pytorch/glob.py:701: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). lengths_mem = th.tensor(lengths_mem, dtype=th.int64, device=device) tests/pytorch/test_nn.py::test_dense_cheb_conv[1] tests/pytorch/test_nn.py::test_dense_cheb_conv[2] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html -- generated xml file: /root/jenkins/workspace/dgl_PR-4648/pytest_backend.xml -- ============================ slowest 100 durations ============================= 25.71s call tests/pytorch/test_dist_optim.py::test_sparse_opt 5.17s call tests/pytorch/test_unified_tensor.py::test_multi_gpu_unified_tensor[1] 5.11s call tests/pytorch/test_sparse_emb.py::test_multiprocess_sparse_emb_get_set[1] 2.52s call tests/pytorch/test_dataloader.py::test_cluster_gcn[0] 1.75s call tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-None-False] 1.45s call tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler0-None-False] 1.31s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor-idtype0] 1.30s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-full-idtype0] 1.28s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-full-idtype1] 1.25s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor2-idtype1] 1.21s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor2-idtype0] 1.20s call tests/pytorch/test_dataloader.py::test_node_dataloader[True-cpu-neighbor-idtype1] 1.18s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-full-idtype0] 1.14s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor-idtype1] 1.12s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor2-idtype1] 1.10s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-full-idtype1] 1.07s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor2-idtype0] 1.07s call tests/pytorch/test_ipc.py::test_torch_ipc 1.04s call tests/pytorch/test_dataloader.py::test_node_dataloader[False-cpu-neighbor-idtype0] 0.98s call tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-cpu-idtype0] 0.82s call tests/pytorch/test_dataloader.py::test_saint[edge-4] 0.81s call tests/pytorch/test_dataloader.py::test_saint[walk-4] 0.80s call tests/pytorch/test_dataloader.py::test_saint[node-4] 0.77s call tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-cpu-idtype0] 0.74s call tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[True-cpu-idtype1] 0.70s call tests/pytorch/test_dataloader.py::test_cluster_gcn[4] 0.69s call tests/pytorch/test_dataloader.py::test_shadow[4] 0.63s call tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-cpu-idtype1] 0.53s call tests/pytorch/test_dataloader.py::test_saint[edge-0] 0.49s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g0-idtype0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g1-idtype1] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g2-idtype1] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g2-idtype0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g5-idtype1] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g1-idtype0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g3-idtype1] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g0-idtype1] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g4-idtype1] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g1-idtype0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g4-idtype0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g5-idtype0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g2-idtype0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g0-idtype0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[2-g3-idtype0] 0.48s call tests/pytorch/test_dataloader.py::test_saint[walk-0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g5-idtype0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g4-idtype0] 0.48s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g3-idtype0] 0.47s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g0-idtype1] 0.47s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g2-idtype1] 0.47s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g1-idtype1] 0.47s call tests/pytorch/test_dataloader.py::test_shadow[0] 0.47s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g5-idtype1] 0.47s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g4-idtype1] 0.47s call tests/pytorch/test_nn.py::test_gnnexplainer[1-g3-idtype1] 0.45s call tests/pytorch/test_dataloader.py::test_saint[node-0] 0.32s call tests/pytorch/test_dataloader.py::test_neighbor_nonuniform[False-pure_gpu-idtype0] 0.27s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-full-idtype0] 0.27s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-full-idtype0] 0.26s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-full-idtype0] 0.26s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-full-idtype1] 0.26s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-full-idtype1] 0.24s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-full-idtype0] 0.24s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-full-idtype0] 0.24s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-full-idtype1] 0.23s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-full-idtype1] 0.22s call tests/pytorch/test_nn.py::test_graph_conv[1-True-True-none-g0-idtype0] 0.22s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-full-idtype1] 0.22s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-full-idtype0] 0.21s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-full-idtype1] 0.19s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-neighbor-idtype0] 0.19s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler1-neighbor-idtype1] 0.18s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-neighbor-idtype0] 0.18s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-neighbor-idtype0] 0.17s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler2-neighbor-idtype1] 0.17s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-neighbor-idtype1] 0.17s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-cpu-neg_sampler0-neighbor-idtype0] 0.16s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-neighbor-idtype0] 0.15s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler2-neighbor-idtype1] 0.15s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler1-neighbor-idtype1] 0.14s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-neighbor-idtype1] 0.14s call tests/pytorch/test_dataloader.py::test_edge_dataloader[False-cpu-neg_sampler0-neighbor-idtype0] 0.14s call tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-reverse_types-True] 0.12s call tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[50-sampler1-reverse_types-False] 0.12s call tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-reverse_types-True] 0.12s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-full-idtype0] 0.11s call tests/pytorch/test_dataloader.py::test_edge_dataloader_excludes[1-sampler1-reverse_types-False] 0.10s call tests/pytorch/test_unified_tensor.py::test_unified_tensor 0.10s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-full-idtype1] 0.10s call tests/pytorch/test_nn.py::test_dgn_conv[True-0-4-0.1-2.5-scalers0-aggregators0-16-16] 0.09s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-neighbor-idtype0] 0.09s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-neighbor-idtype1] 0.09s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-uva-neg_sampler0-neighbor-idtype0] 0.09s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler0-neighbor-idtype1] 0.09s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-full-idtype0] 0.09s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-full-idtype1] 0.09s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-full-idtype0] 0.09s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-neighbor-idtype0] 0.09s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler2-full-idtype1] 0.09s call tests/pytorch/test_dataloader.py::test_edge_dataloader[True-pure_gpu-neg_sampler1-neighbor-idtype1] ========= 3745 passed, 233 skipped, 863 warnings in 134.40s (0:02:14) ========== [Pipeline] } [Pipeline] // timeout [Pipeline] } [Pipeline] // stage [Pipeline] stage [Pipeline] { (Torch GPU Example test) [Pipeline] sh Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks build cluster_gcn.pkl cmake conda cora_full cora_full.zip dataset dglgo docker docs examples featgraph include minigc ogbg-molhiv-as-graphpred ogbl-collab-as-linkpred ogbn-arxiv-as-nodepred optim_ip_config.txt pyproject.toml pytest_backend.xml pytest_compute.xml python readthedocs.yml src tensoradapter tests third_party tools tutorials Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@61a0ca14; decorates RemoteLauncher[hudson.remoting.Channel@41a64267:dglci-manual-gpu-worker] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... [Pipeline] sh + git submodule update --recursive --init Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-cpu-linux [Pipeline] timeout Timeout set to expire in 30 min [Pipeline] { [Pipeline] sh Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' + bash tests/scripts/task_distributed_test.sh pytorch cpu Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Requirement already satisfied: pytest in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (7.1.2) Collecting psutil Downloading psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (281 kB) Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 281.3/281.3 kB 8.5 MB/s eta 0:00:00 Collecting pyyaml Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 596.3/596.3 kB 32.7 MB/s eta 0:00:00 Collecting pydantic Downloading pydantic-1.10.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.8 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 11.8/11.8 MB 84.0 MB/s eta 0:00:00 Requirement already satisfied: pandas in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (1.1.5) Collecting rdflib Downloading rdflib-6.2.0-py3-none-any.whl (500 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 500.3/500.3 kB 48.0 MB/s eta 0:00:00 Requirement already satisfied: ogb in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (1.3.3) Collecting filelock Downloading filelock-3.8.0-py3-none-any.whl (10 kB) Collecting pyarrow Downloading pyarrow-9.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (35.3 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 35.3/35.3 MB 53.0 MB/s eta 0:00:00 Requirement already satisfied: pluggy<2.0,>=0.12 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (1.0.0) Requirement already satisfied: attrs>=19.2.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (22.1.0) Requirement already satisfied: iniconfig in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (1.1.1) Requirement already satisfied: packaging in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (21.3) Requirement already satisfied: tomli>=1.0.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (2.0.1) Requirement already satisfied: py>=1.8.2 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (1.11.0) Requirement already satisfied: importlib-metadata>=0.12 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pytest) (4.12.0) Requirement already satisfied: typing-extensions>=4.1.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pydantic) (4.3.0) Requirement already satisfied: numpy>=1.15.4 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pandas) (1.21.6) Requirement already satisfied: pytz>=2017.2 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pandas) (2022.2.1) Requirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from pandas) (2.8.2) Requirement already satisfied: pyparsing in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from rdflib) (3.0.9) Requirement already satisfied: setuptools in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from rdflib) (61.2.0) Collecting isodate Downloading isodate-0.6.1-py2.py3-none-any.whl (41 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 41.7/41.7 kB 7.1 MB/s eta 0:00:00 Requirement already satisfied: torch>=1.6.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.9.0+cpu) Requirement already satisfied: tqdm>=4.29.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (4.64.0) Requirement already satisfied: six>=1.12.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.16.0) Requirement already satisfied: scikit-learn>=0.20.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.0.2) Requirement already satisfied: urllib3>=1.24.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (1.26.11) Requirement already satisfied: outdated>=0.2.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from ogb) (0.2.1) Requirement already satisfied: zipp>=0.5 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from importlib-metadata>=0.12->pytest) (3.8.1) Requirement already satisfied: requests in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from outdated>=0.2.0->ogb) (2.28.1) Requirement already satisfied: littleutils in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from outdated>=0.2.0->ogb) (0.2.2) Requirement already satisfied: threadpoolctl>=2.0.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (3.1.0) Requirement already satisfied: scipy>=1.1.0 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.7.3) Requirement already satisfied: joblib>=0.11 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.1.0) Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2022.6.15) Requirement already satisfied: charset-normalizer<3,>=2 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2.1.0) Requirement already satisfied: idna<4,>=2.5 in /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (3.3) Installing collected packages: pyyaml, pydantic, pyarrow, psutil, isodate, filelock, rdflib Successfully installed filelock-3.8.0 isodate-0.6.1 psutil-5.9.2 pyarrow-9.0.0 pydantic-1.10.2 pyyaml-6.0 rdflib-6.2.0 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/pytorch-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648 collecting ... [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-gpu-linux [Pipeline] timeout Timeout set to expire in 20 min [Pipeline] { [Pipeline] sh + bash tests/scripts/task_example_test.sh gpu run graph store with port 7815 collected 55 items tests/distributed/test_dist_graph_store.py::test_server_client /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tensor([0.0036, 0.0078, 0.0176, 0.0096, 0.0114, 0.0081, 0.0141, 0.0080, 0.0070, 0.0140, 0.0134, 0.0092, 0.0082, 0.0113, 0.0089, 0.0142, 0.0063, 0.0083, 0.0083, 0.0097, 0.0065, 0.0048, 0.0065, 0.0069, 0.0061, 0.0092, 0.0079, 0.0112, 0.0069, 0.0067, 0.0122, 0.0094, 0.0090, 0.0130, 0.0107, 0.0128, 0.0052, 0.0114, 0.0078, 0.0101, 0.0037, 0.0127, 0.0129, 0.0141, 0.0095, 0.0049, 0.0080, 0.0133, 0.0204, 0.0128, 0.0053, 0.0130, 0.0142, 0.0123, 0.0062, 0.0124, 0.0082, 0.0124, 0.0124, 0.0101, 0.0052, 0.0143, 0.0048, 0.0156, 0.0129, 0.0142, 0.0078, 0.0097, 0.0063, 0.0046, 0.0085, 0.0120, 0.0063, 0.0108, 0.0099, 0.0128, 0.0095, 0.0120, 0.0051, 0.0080, 0.0069, 0.0195, 0.0102, 0.0080, 0.0079, 0.0015, 0.0098, 0.0151, 0.0092, 0.0137, 0.0225, 0.0066, 0.0141, 0.0100, 0.0079, 0.0083, 0.0098, 0.0098, 0.0082, 0.0101]) Converting to homogeneous graph takes 0.004s, peak mem: 1.356 GB Convert a graph into a bidirected graph: 0.005 seconds, peak memory: 1.356 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.356 GB [05:28:40] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 10000 nodes and 199872 edges into 4 parts and get 56541 edge cuts Metis partitioning: 0.049 seconds, peak memory: 1.356 GB Assigning nodes to METIS partitions takes 0.055s, peak mem: 1.356 GB Save partitions: 0.004 seconds, peak memory: 1.356 GB There are 100000 edges in the graph and 0 edge cuts for 1 partitions. start client 0 start client 1 [05:28:41] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:28:41] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load dist_graph_test_2 start server 0 start graph service on server 0 for part 0 [05:28:41] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:28:41] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13571]... start client 2 [05:28:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:28:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. start client 3 [05:28:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:28:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:28:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:28:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Training with DGL built-in GraphConv module. Downloading /root/jenkins/workspace/dgl_PR-4648/cora_v2.zip from https://data.dgl.ai/dataset/cora_v2.zip... Extracting file to /root/jenkins/workspace/dgl_PR-4648/cora_v2 Finished data loading and preprocessing. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done saving data into cached files. Training... Epoch 00000 | Loss 1.9462 | Accuracy 0.2580 Epoch 00001 | Loss 1.9409 | Accuracy 0.3240 Epoch 00002 | Loss 1.9337 | Accuracy 0.3020 Epoch 00003 | Loss 1.9265 | Accuracy 0.3420 Epoch 00004 | Loss 1.9188 | Accuracy 0.3700 Epoch 00005 | Loss 1.9101 | Accuracy 0.4140 Epoch 00006 | Loss 1.9004 | Accuracy 0.4980 Epoch 00007 | Loss 1.8909 | Accuracy 0.5740 Epoch 00008 | Loss 1.8814 | Accuracy 0.6220 Epoch 00009 | Loss 1.8704 | Accuracy 0.6320 Epoch 00010 | Loss 1.8582 | Accuracy 0.6320 Epoch 00011 | Loss 1.8513 | Accuracy 0.6280 Epoch 00012 | Loss 1.8343 | Accuracy 0.6480 Epoch 00013 | Loss 1.8235 | Accuracy 0.6680 Epoch 00014 | Loss 1.8096 | Accuracy 0.6700 Epoch 00015 | Loss 1.7921 | Accuracy 0.6900 Epoch 00016 | Loss 1.7764 | Accuracy 0.7080 Epoch 00017 | Loss 1.7533 | Accuracy 0.7020 Epoch 00018 | Loss 1.7434 | Accuracy 0.6940 Epoch 00019 | Loss 1.7264 | Accuracy 0.6920 Epoch 00020 | Loss 1.7177 | Accuracy 0.6980 Epoch 00021 | Loss 1.6990 | Accuracy 0.7080 Epoch 00022 | Loss 1.6810 | Accuracy 0.7060 Epoch 00023 | Loss 1.6548 | Accuracy 0.7120 Epoch 00024 | Loss 1.6515 | Accuracy 0.7160 Epoch 00025 | Loss 1.6183 | Accuracy 0.7220 Epoch 00026 | Loss 1.6064 | Accuracy 0.7300 Epoch 00027 | Loss 1.5987 | Accuracy 0.7420 Epoch 00028 | Loss 1.5722 | Accuracy 0.7480 Epoch 00029 | Loss 1.5536 | Accuracy 0.7560 Epoch 00030 | Loss 1.5210 | Accuracy 0.7600 Epoch 00031 | Loss 1.5227 | Accuracy 0.7740 Epoch 00032 | Loss 1.4687 | Accuracy 0.7740 Epoch 00033 | Loss 1.4591 | Accuracy 0.7760 Epoch 00034 | Loss 1.4282 | Accuracy 0.7780 Epoch 00035 | Loss 1.3976 | Accuracy 0.7780 Epoch 00036 | Loss 1.3886 | Accuracy 0.7800 Epoch 00037 | Loss 1.3805 | Accuracy 0.7800 Epoch 00038 | Loss 1.3435 | Accuracy 0.7820 Epoch 00039 | Loss 1.3721 | Accuracy 0.7820 Epoch 00040 | Loss 1.3254 | Accuracy 0.7820 Epoch 00041 | Loss 1.2808 | Accuracy 0.7800 Epoch 00042 | Loss 1.2840 | Accuracy 0.7740 Epoch 00043 | Loss 1.2243 | Accuracy 0.7780 Epoch 00044 | Loss 1.2208 | Accuracy 0.7760 Epoch 00045 | Loss 1.2212 | Accuracy 0.7760 Epoch 00046 | Loss 1.1585 | Accuracy 0.7780 Epoch 00047 | Loss 1.1558 | Accuracy 0.7800 Epoch 00048 | Loss 1.1347 | Accuracy 0.7820 Epoch 00049 | Loss 1.1403 | Accuracy 0.7880 Epoch 00050 | Loss 1.1074 | Accuracy 0.7920 Epoch 00051 | Loss 1.0424 | Accuracy 0.7920 Epoch 00052 | Loss 1.0537 | Accuracy 0.7940 Epoch 00053 | Loss 1.0229 | Accuracy 0.7960 Epoch 00054 | Loss 1.0028 | Accuracy 0.7940 Epoch 00055 | Loss 1.0399 | Accuracy 0.7940 Epoch 00056 | Loss 0.9750 | Accuracy 0.7960 Epoch 00057 | Loss 0.9453 | Accuracy 0.7920 Epoch 00058 | Loss 0.9648 | Accuracy 0.7880 Epoch 00059 | Loss 0.9406 | Accuracy 0.7880 Epoch 00060 | Loss 0.9175 | Accuracy 0.7880 Epoch 00061 | Loss 0.9537 | Accuracy 0.7880 Epoch 00062 | Loss 0.9176 | Accuracy 0.7820 Epoch 00063 | Loss 0.8880 | Accuracy 0.7840 Epoch 00064 | Loss 0.8529 | Accuracy 0.7840 Epoch 00065 | Loss 0.8151 | Accuracy 0.7840 Epoch 00066 | Loss 0.8164 | Accuracy 0.7880 Epoch 00067 | Loss 0.8098 | Accuracy 0.7900 Epoch 00068 | Loss 0.8055 | Accuracy 0.7920 Epoch 00069 | Loss 0.7819 | Accuracy 0.7940 Epoch 00070 | Loss 0.7831 | Accuracy 0.8000 Epoch 00071 | Loss 0.7502 | Accuracy 0.7980 Epoch 00072 | Loss 0.7618 | Accuracy 0.8000 Epoch 00073 | Loss 0.7383 | Accuracy 0.7980 Epoch 00074 | Loss 0.6967 | Accuracy 0.7980 Epoch 00075 | Loss 0.7508 | Accuracy 0.7920 Epoch 00076 | Loss 0.7197 | Accuracy 0.7840 Epoch 00077 | Loss 0.7183 | Accuracy 0.7840 Epoch 00078 | Loss 0.7003 | Accuracy 0.7840 Epoch 00079 | Loss 0.7260 | Accuracy 0.7840 Epoch 00080 | Loss 0.6990 | Accuracy 0.7840 Epoch 00081 | Loss 0.7018 | Accuracy 0.7860 Epoch 00082 | Loss 0.6802 | Accuracy 0.7900 Epoch 00083 | Loss 0.6447 | Accuracy 0.7900 Epoch 00084 | Loss 0.6260 | Accuracy 0.7880 Epoch 00085 | Loss 0.6349 | Accuracy 0.7900 Epoch 00086 | Loss 0.6400 | Accuracy 0.7900 Epoch 00087 | Loss 0.6182 | Accuracy 0.7920 Epoch 00088 | Loss 0.5869 | Accuracy 0.7920 Epoch 00089 | Loss 0.6108 | Accuracy 0.7900 Epoch 00090 | Loss 0.6057 | Accuracy 0.7900 Epoch 00091 | Loss 0.5550 | Accuracy 0.7900 Epoch 00092 | Loss 0.5728 | Accuracy 0.7860 Epoch 00093 | Loss 0.6150 | Accuracy 0.7860 Epoch 00094 | Loss 0.5653 | Accuracy 0.7860 Epoch 00095 | Loss 0.5723 | Accuracy 0.7880 Epoch 00096 | Loss 0.5682 | Accuracy 0.7880 Epoch 00097 | Loss 0.5510 | Accuracy 0.7880 Epoch 00098 | Loss 0.5710 | Accuracy 0.7880 Epoch 00099 | Loss 0.5857 | Accuracy 0.7860 Epoch 00100 | Loss 0.5164 | Accuracy 0.7860 Epoch 00101 | Loss 0.5594 | Accuracy 0.7940 Epoch 00102 | Loss 0.5639 | Accuracy 0.7920 Epoch 00103 | Loss 0.4940 | Accuracy 0.7940 Epoch 00104 | Loss 0.5070 | Accuracy 0.7940 Epoch 00105 | Loss 0.5081 | Accuracy 0.7940 Epoch 00106 | Loss 0.5046 | Accuracy 0.7940 Epoch 00107 | Loss 0.5082 | Accuracy 0.7940 Epoch 00108 | Loss 0.4807 | Accuracy 0.7920 Epoch 00109 | Loss 0.4636 | Accuracy 0.7920 Epoch 00110 | Loss 0.5204 | Accuracy 0.7880 Epoch 00111 | Loss 0.4859 | Accuracy 0.7900 Epoch 00112 | Loss 0.4774 | Accuracy 0.7900 Epoch 00113 | Loss 0.4575 | Accuracy 0.7880 Epoch 00114 | Loss 0.4771 | Accuracy 0.7960 Epoch 00115 | Loss 0.5051 | Accuracy 0.7960 Epoch 00116 | Loss 0.4516 | Accuracy 0.7960 Epoch 00117 | Loss 0.4474 | Accuracy 0.7920 Epoch 00118 | Loss 0.4601 | Accuracy 0.7920 Epoch 00119 | Loss 0.4451 | Accuracy 0.7900 Epoch 00120 | Loss 0.4848 | Accuracy 0.7900 Epoch 00121 | Loss 0.4628 | Accuracy 0.7880 Epoch 00122 | Loss 0.4522 | Accuracy 0.7880 Epoch 00123 | Loss 0.4658 | Accuracy 0.7860 Epoch 00124 | Loss 0.4926 | Accuracy 0.7920 Epoch 00125 | Loss 0.4521 | Accuracy 0.7920 Epoch 00126 | Loss 0.4403 | Accuracy 0.7920 Epoch 00127 | Loss 0.4325 | Accuracy 0.7960 Epoch 00128 | Loss 0.4528 | Accuracy 0.7920 Epoch 00129 | Loss 0.4277 | Accuracy 0.7940 Epoch 00130 | Loss 0.4162 | Accuracy 0.7980 Epoch 00131 | Loss 0.4363 | Accuracy 0.7980 Epoch 00132 | Loss 0.4738 | Accuracy 0.7980 Epoch 00133 | Loss 0.4386 | Accuracy 0.7980 Epoch 00134 | Loss 0.4705 | Accuracy 0.7940 Epoch 00135 | Loss 0.4417 | Accuracy 0.7960 Epoch 00136 | Loss 0.3999 | Accuracy 0.7940 Epoch 00137 | Loss 0.4179 | Accuracy 0.7960 Epoch 00138 | Loss 0.4020 | Accuracy 0.7920 Epoch 00139 | Loss 0.4014 | Accuracy 0.7840 Epoch 00140 | Loss 0.4077 | Accuracy 0.7860 Epoch 00141 | Loss 0.3857 | Accuracy 0.7900 Epoch 00142 | Loss 0.4134 | Accuracy 0.7920 Epoch 00143 | Loss 0.4346 | Accuracy 0.7960 Epoch 00144 | Loss 0.4352 | Accuracy 0.7960 Epoch 00145 | Loss 0.4007 | Accuracy 0.8020 Epoch 00146 | Loss 0.4016 | Accuracy 0.7980 Epoch 00147 | Loss 0.3762 | Accuracy 0.7920 Epoch 00148 | Loss 0.3756 | Accuracy 0.7880 Epoch 00149 | Loss 0.3848 | Accuracy 0.7860 Epoch 00150 | Loss 0.4018 | Accuracy 0.7820 Epoch 00151 | Loss 0.3802 | Accuracy 0.7920 Epoch 00152 | Loss 0.4017 | Accuracy 0.7900 Epoch 00153 | Loss 0.3435 | Accuracy 0.7940 Epoch 00154 | Loss 0.3867 | Accuracy 0.7980 Epoch 00155 | Loss 0.3685 | Accuracy 0.8040 Epoch 00156 | Loss 0.4117 | Accuracy 0.8000 Epoch 00157 | Loss 0.3740 | Accuracy 0.8020 Epoch 00158 | Loss 0.4028 | Accuracy 0.7980 Epoch 00159 | Loss 0.3579 | Accuracy 0.7920 Epoch 00160 | Loss 0.3868 | Accuracy 0.7960 Epoch 00161 | Loss 0.3549 | Accuracy 0.7940 Epoch 00162 | Loss 0.3999 | Accuracy 0.7940 Epoch 00163 | Loss 0.3725 | Accuracy 0.7940 Epoch 00164 | Loss 0.3903 | Accuracy 0.7900 Epoch 00165 | Loss 0.3571 | Accuracy 0.7940 Epoch 00166 | Loss 0.3791 | Accuracy 0.7960 Epoch 00167 | Loss 0.3439 | Accuracy 0.8020 Epoch 00168 | Loss 0.3250 | Accuracy 0.8040 Epoch 00169 | Loss 0.3488 | Accuracy 0.8020 Epoch 00170 | Loss 0.3246 | Accuracy 0.8020 Epoch 00171 | Loss 0.3719 | Accuracy 0.8020 Epoch 00172 | Loss 0.3256 | Accuracy 0.7940 Epoch 00173 | Loss 0.3185 | Accuracy 0.7940 Epoch 00174 | Loss 0.3587 | Accuracy 0.7920 Epoch 00175 | Loss 0.3587 | Accuracy 0.7900 Epoch 00176 | Loss 0.3303 | Accuracy 0.7880 Epoch 00177 | Loss 0.3268 | Accuracy 0.7880 Epoch 00178 | Loss 0.3563 | Accuracy 0.7880 Epoch 00179 | Loss 0.3828 | Accuracy 0.7920 Epoch 00180 | Loss 0.3369 | Accuracy 0.7980 Epoch 00181 | Loss 0.3357 | Accuracy 0.8020 Epoch 00182 | Loss 0.3519 | Accuracy 0.8020 Epoch 00183 | Loss 0.3699 | Accuracy 0.8040 Epoch 00184 | Loss 0.3319 | Accuracy 0.8020 Epoch 00185 | Loss 0.3564 | Accuracy 0.7980 Epoch 00186 | Loss 0.3564 | Accuracy 0.7960 Epoch 00187 | Loss 0.3439 | Accuracy 0.7900 Epoch 00188 | Loss 0.3303 | Accuracy 0.7920 Epoch 00189 | Loss 0.3270 | Accuracy 0.7940 Epoch 00190 | Loss 0.3114 | Accuracy 0.7960 Epoch 00191 | Loss 0.3439 | Accuracy 0.7960 Epoch 00192 | Loss 0.3258 | Accuracy 0.7980 Epoch 00193 | Loss 0.3129 | Accuracy 0.8000 Epoch 00194 | Loss 0.3424 | Accuracy 0.7980 Epoch 00195 | Loss 0.3218 | Accuracy 0.7940 Epoch 00196 | Loss 0.3207 | Accuracy 0.8000 Epoch 00197 | Loss 0.3627 | Accuracy 0.7980 Epoch 00198 | Loss 0.3112 | Accuracy 0.8000 Epoch 00199 | Loss 0.3258 | Accuracy 0.7960 Testing... Test accuracy 0.8190 lda/lda_model.py:29: UserWarning: cached_property not found - using property instead warnings.warn("cached_property not found - using property instead") Testing LatentDirichletAllocation ... {'prior': {'doc': 0.1, 'word': 0.1}, 'rho': 1, 'mult': {'doc': 1, 'word': 1}, 'init': {'doc': (100.0, 100.0), 'word': (100.0, 100.0)}, 'word_data': [tensor([[0.0000e+00, 0.0000e+00, 0.0000e+00, 4.5631e-05, 0.0000e+00], [0.0000e+00, 0.0000e+00, 0.0000e+00, 6.1491e-06, 0.0000e+00], [0.0000e+00, 0.0000e+00, 0.0000e+00, 9.9963e-01, 0.0000e+00], [0.0000e+00, 0.0000e+00, 0.0000e+00, 4.5618e-05, 0.0000e+00], [0.0000e+00, 0.0000e+00, 0.0000e+00, 4.5618e-05, 0.0000e+00], [0.0000e+00, 0.0000e+00, 0.0000e+00, 4.5618e-05, 0.0000e+00], [0.0000e+00, 0.0000e+00, 0.0000e+00, 4.5618e-05, 0.0000e+00], [0.0000e+00, 0.0000e+00, 0.0000e+00, 4.5618e-05, 0.0000e+00], [0.0000e+00, 0.0000e+00, 0.0000e+00, 4.5620e-05, 0.0000e+00], [0.0000e+00, 0.0000e+00, 0.0000e+00, 4.5618e-05, 0.0000e+00]])]} Testing LatentDirichletAllocation passed! [Pipeline] } [Pipeline] // timeout [Pipeline] } [Pipeline] // stage Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 f9b6859df1d65887fd5c1562bda6c9300bf7673dd8efba82320500053c49173b $ docker rm -f f9b6859df1d65887fd5c1562bda6c9300bf7673dd8efba82320500053c49173b [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } Running on dglci-manual-gpu-worker in /root/jenkins/workspace/dgl_PR-4648 [Pipeline] // node [Pipeline] { [Pipeline] } [Pipeline] // stage [Pipeline] } [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags Client [1104] waits on 172.17.0.3:56245Client [1112] waits on 172.17.0.3:53595Client [1122] waits on 172.17.0.3:40013 Client [1117] waits on 172.17.0.3:49365 Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (1) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly! Machine (0) group (0) client (3) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") Client[1] in group[0] is exiting... Client[2] in group[0] is exiting... Client[3] in group[0] is exiting... Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... clients have terminated Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Cleaning workspace Fetching without tags > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 Commit message: "fix for pytorch < 1.12" Cleaning workspace > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh Converting to homogeneous graph takes 0.003s, peak mem: 1.369 GB Save partitions: 0.007 seconds, peak memory: 1.369 GB There are 100000 edges in the graph and 0 edge cuts for 1 partitions. start client 0 + docker pull rapidsai/cugraph_nightly_torch-cuda:11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load dist_graph_test_1 start server 0 start graph service on server 0 for part 0 [05:28:52] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:28:52] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:24053]... [05:28:52] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:28:52] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. 11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10: Pulling from rapidsai/cugraph_nightly_torch-cuda Digest: sha256:72e8cb2632449beac4895f24b55018dea21f79da47fe39be05ef83a4fd3ddb67 Status: Image is up to date for rapidsai/cugraph_nightly_torch-cuda:11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10 docker.io/rapidsai/cugraph_nightly_torch-cuda:11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10 [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker inspect -f . rapidsai/cugraph_nightly_torch-cuda:11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10 . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dglci-manual-gpu-worker does not seem to be running inside a container $ docker run -t -d -u 0:0 --runtime nvidia --shm-size=8gb -w /root/jenkins/workspace/dgl_PR-4648 -v /root/jenkins/workspace/dgl_PR-4648:/root/jenkins/workspace/dgl_PR-4648:rw,z -v /root/jenkins/workspace/dgl_PR-4648@tmp:/root/jenkins/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** rapidsai/cugraph_nightly_torch-cuda:11.5-base-ubuntu18.04-py3.9-pytorch1.12.0-rapids22.10 cat $ docker top b7adb4db5f4cf1d886ff23a8dd7a1caea99af2b94c6076d915adcd91d0c69ad9 -eo pid,comm [Pipeline] { [Pipeline] stage [Pipeline] { (PyTorch Cugraph GPU Unit test) [Pipeline] sh + nvidia-smi Tue Sep 27 05:28:53 2022 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 510.47.03 Driver Version: 510.47.03 CUDA Version: 11.6 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla T4 On | 00000000:00:1E.0 Off | 0 | | N/A 44C P8 16W / 70W | 0MiB / 15360MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | No running processes found | +-----------------------------------------------------------------------------+ [Pipeline] sh + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@b0ad4e; decorates RemoteLauncher[hudson.remoting.Channel@41a64267:dglci-manual-gpu-worker] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Client [1163] waits on 172.17.0.3:32903 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") end Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... clients have terminated [Pipeline] sh + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... Converting to homogeneous graph takes 0.011s, peak mem: 1.371 GB Save partitions: 0.029 seconds, peak memory: 1.371 GB There are 300600 edges in the graph and 0 edge cuts for 1 partitions. start client 0 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load dist_graph_test_3 [05:29:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:29:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. start server 0 start graph service on server 0 for part 0 [05:29:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:29:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:11907]... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nccl'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/phmap'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... Client [1185] waits on 172.17.0.3:53831 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") end Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust'... clients have terminated Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... Converting to homogeneous graph takes 0.019s, peak mem: 1.390 GB Save partitions: 0.028 seconds, peak memory: 1.390 GB There are 300600 edges in the graph and 0 edge cuts for 1 partitions. start client 0 Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... [05:29:34] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:29:34] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load dist_graph_test_3 start server 0 start graph service on server 0 for part 0 [05:29:34] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:29:34] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:20710]... Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash Client [1211] waits on 172.17.0.3:42831 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") end Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... clients have terminated [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-cugraph-linux [Pipeline] timeout Timeout set to expire in 15 min [Pipeline] { [Pipeline] sh + bash tests/scripts/cugraph_unit_test.sh pytorch Collecting pytest Downloading pytest-7.1.3-py3-none-any.whl (298 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 298.2/298.2 kB 8.6 MB/s eta 0:00:00 Requirement already satisfied: psutil in /opt/conda/lib/python3.9/site-packages (5.9.2) Requirement already satisfied: pyyaml in /opt/conda/lib/python3.9/site-packages (6.0) Collecting pydantic Downloading pydantic-1.10.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (13.2 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 13.2/13.2 MB 98.0 MB/s eta 0:00:00 Requirement already satisfied: pandas in /opt/conda/lib/python3.9/site-packages (1.4.4) Collecting rdflib Downloading rdflib-6.2.0-py3-none-any.whl (500 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 500.3/500.3 kB 67.0 MB/s eta 0:00:00 Collecting ogb Downloading ogb-1.3.4-py3-none-any.whl (78 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 78.6/78.6 kB 21.3 MB/s eta 0:00:00 Collecting pluggy<2.0,>=0.12 Downloading pluggy-1.0.0-py2.py3-none-any.whl (13 kB) Collecting iniconfig Downloading iniconfig-1.1.1-py2.py3-none-any.whl (5.0 kB) Collecting tomli>=1.0.0 Downloading tomli-2.0.1-py3-none-any.whl (12 kB) Collecting attrs>=19.2.0 Downloading attrs-22.1.0-py2.py3-none-any.whl (58 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 58.8/58.8 kB 12.6 MB/s eta 0:00:00 Collecting py>=1.8.2 Downloading py-1.11.0-py2.py3-none-any.whl (98 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 98.7/98.7 kB 21.9 MB/s eta 0:00:00 Requirement already satisfied: packaging in /opt/conda/lib/python3.9/site-packages (from pytest) (21.3) Requirement already satisfied: typing-extensions>=4.1.0 in /opt/conda/lib/python3.9/site-packages (from pydantic) (4.3.0) Requirement already satisfied: python-dateutil>=2.8.1 in /opt/conda/lib/python3.9/site-packages (from pandas) (2.8.2) Requirement already satisfied: pytz>=2020.1 in /opt/conda/lib/python3.9/site-packages (from pandas) (2022.2.1) Requirement already satisfied: numpy>=1.18.5 in /opt/conda/lib/python3.9/site-packages (from pandas) (1.22.4) Requirement already satisfied: pyparsing in /opt/conda/lib/python3.9/site-packages (from rdflib) (3.0.9) Requirement already satisfied: setuptools in /opt/conda/lib/python3.9/site-packages (from rdflib) (65.3.0) Collecting isodate Downloading isodate-0.6.1-py2.py3-none-any.whl (41 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 41.7/41.7 kB 11.3 MB/s eta 0:00:00 Requirement already satisfied: urllib3>=1.24.0 in /opt/conda/lib/python3.9/site-packages (from ogb) (1.26.11) Requirement already satisfied: tqdm>=4.29.0 in /opt/conda/lib/python3.9/site-packages (from ogb) (4.64.1) Requirement already satisfied: torch>=1.6.0 in /opt/conda/lib/python3.9/site-packages (from ogb) (1.12.0) Collecting outdated>=0.2.0 Downloading outdated-0.2.1-py3-none-any.whl (7.5 kB) Collecting scikit-learn>=0.20.0 Downloading scikit_learn-1.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (30.8 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 30.8/30.8 MB 68.6 MB/s eta 0:00:00 Requirement already satisfied: six>=1.12.0 in /opt/conda/lib/python3.9/site-packages (from ogb) (1.16.0) Collecting littleutils Downloading littleutils-0.2.2.tar.gz (6.6 kB) Preparing metadata (setup.py): started Preparing metadata (setup.py): finished with status 'done' Requirement already satisfied: requests in /opt/conda/lib/python3.9/site-packages (from outdated>=0.2.0->ogb) (2.28.1) Requirement already satisfied: scipy>=1.3.2 in /opt/conda/lib/python3.9/site-packages (from scikit-learn>=0.20.0->ogb) (1.9.1) Collecting threadpoolctl>=2.0.0 Downloading threadpoolctl-3.1.0-py3-none-any.whl (14 kB) Requirement already satisfied: joblib>=1.0.0 in /opt/conda/lib/python3.9/site-packages (from scikit-learn>=0.20.0->ogb) (1.2.0) Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.9/site-packages (from requests->outdated>=0.2.0->ogb) (2022.9.14) Requirement already satisfied: charset-normalizer<3,>=2 in /opt/conda/lib/python3.9/site-packages (from requests->outdated>=0.2.0->ogb) (2.1.1) Requirement already satisfied: idna<4,>=2.5 in /opt/conda/lib/python3.9/site-packages (from requests->outdated>=0.2.0->ogb) (3.3) Building wheels for collected packages: littleutils Building wheel for littleutils (setup.py): started Building wheel for littleutils (setup.py): finished with status 'done' Created wheel for littleutils: filename=littleutils-0.2.2-py3-none-any.whl size=7028 sha256=e9f7281664cc0f6a5d41f79ccf3d793de57dc8c78c3d4405d9eb041e027b9f59 Stored in directory: /root/.cache/pip/wheels/04/bb/0d/2d02ec45f29c48d6192476bfb59c5a0e64b605e7212374dd15 Successfully built littleutils Installing collected packages: littleutils, iniconfig, tomli, threadpoolctl, pydantic, py, pluggy, isodate, attrs, scikit-learn, rdflib, pytest, outdated, ogb Running on dgl-manual-large-cpu in /root/jenkins/workspace/dgl_PR-4648@4 [Pipeline] { [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags Converting to homogeneous graph takes 0.005s, peak mem: 1.390 GB Save partitions: 0.008 seconds, peak memory: 1.390 GB There are 100000 edges in the graph and 0 edge cuts for 1 partitions. start client[0] for group[0] Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648@4 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 [05:29:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:29:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load check_server_client_True_1_1_1 start server 0 start graph service on server 0 for part 0 [05:29:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:29:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:12813]... Successfully installed attrs-22.1.0 iniconfig-1.1.1 isodate-0.6.1 littleutils-0.2.2 ogb-1.3.4 outdated-0.2.1 pluggy-1.0.0 py-1.11.0 pydantic-1.10.2 pytest-7.1.3 rdflib-6.2.0 scikit-learn-1.1.2 threadpoolctl-3.1.0 tomli-2.0.1 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ============================= test session starts ============================== platform linux -- Python 3.9.13, pytest-7.1.3, pluggy-1.0.0 -- /opt/conda/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648 collecting ... Cleaning workspace Fetching without tags PASSED [ 5%] tests/compute/test_data.py::test_explain_syn Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 Commit message: "fix for pytorch < 1.12" Cleaning workspace [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh PASSED [ 5%] tests/compute/test_data.py::test_wiki_cs + docker pull dgllib/dgl-ci-cpu:cu101_v220629 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 cu101_v220629: Pulling from dgllib/dgl-ci-cpu Digest: sha256:1f698bdb17257817c14ca7339a211328f6dc310f344a4a7fbf4cc7e53854427c Status: Image is up to date for dgllib/dgl-ci-cpu:cu101_v220629 docker.io/dgllib/dgl-ci-cpu:cu101_v220629 [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker inspect -f . dgllib/dgl-ci-cpu:cu101_v220629 . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dgl-manual-large-cpu does not seem to be running inside a container $ docker run -t -d -u 0:0 -w /root/jenkins/workspace/dgl_PR-4648@4 -v /root/jenkins/workspace/dgl_PR-4648@4:/root/jenkins/workspace/dgl_PR-4648@4:rw,z -v /root/jenkins/workspace/dgl_PR-4648@4@tmp:/root/jenkins/workspace/dgl_PR-4648@4@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-cpu:cu101_v220629 cat $ docker top 6db97419e6b6b2f1427e0e55271c5f8554e9cd989f9b44da6a3861a1be7f5a89 -eo pid,comm [Pipeline] { [Pipeline] stage [Pipeline] { (MXNet CPU Unit test) [Pipeline] sh + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@45e8259d; decorates RemoteLauncher[hudson.remoting.Channel@1d9b9638:dgl-manual-large-cpu] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648@4/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Client [1229] waits on 172.17.0.3:49273 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") end Client[0] in group[0] is exiting... Commit message: "fix for pytorch < 1.12" Cleaning workspace > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Server (0) shutdown. Server is exiting... clients have terminated [Pipeline] sh + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' PASSED [ 5%] tests/compute/test_data.py::test_explain_syn PASSED [ 5%] tests/compute/test_data.py::test_wiki_cs Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/METIS'... PASSED [ 5%] tests/compute/test_data.py::test_yelp SKIPPED (Dataset too large to ...) [ 5%] tests/compute/test_data.py::test_flickr PASSED [ 5%] tests/compute/test_data.py::test_yelp SKIPPED (Dataset too large to ...) [ 5%] tests/compute/test_data.py::test_flickr Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/dmlc-core'... collected 3 items tests/cugraph/test_basics.py::test_dummy PASSED [ 33%] tests/cugraph/test_basics.py::test_to_cugraph_conversion Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/googletest'... PASSED [ 5%] tests/compute/test_data.py::test_extract_archive PASSED [ 5%] tests/compute/test_data.py::test_csvdataset Converting to homogeneous graph takes 0.003s, peak mem: 1.390 GB Save partitions: 0.005 seconds, peak memory: 1.390 GB There are 100000 edges in the graph and 0 edge cuts for 1 partitions. start client[0] for group[0] PASSED [ 5%] tests/compute/test_data.py::test_add_nodepred_split PASSED [ 5%] tests/compute/test_data.py::test_extract_archive PASSED [ 5%] tests/compute/test_data.py::test_csvdataset Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/libxsmm'... [05:29:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:29:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load check_server_client_False_1_1_1 start server 0 start graph service on server 0 for part 0 [05:29:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:29:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:10759]... PASSED [ 5%] tests/compute/test_data.py::test_add_nodepred_split PASSED [ 5%] tests/compute/test_data.py::test_as_nodepred1 PASSED [ 5%] tests/compute/test_data.py::test_as_nodepred2 PASSED [ 5%] tests/compute/test_data.py::test_as_nodepred_ogb PASSED [ 5%] tests/compute/test_data.py::test_as_nodepred1 PASSED [ 5%] tests/compute/test_data.py::test_as_nodepred2 PASSED [ 5%] tests/compute/test_data.py::test_as_nodepred_ogb SKIPPED (ogb only s...) [ 5%] tests/compute/test_data.py::test_as_linkpred PASSED [ 5%] tests/compute/test_data.py::test_as_linkpred_ogb SKIPPED (ogb only s...) [ 5%] tests/compute/test_data.py::test_as_nodepred_csvdataset PASSED [ 5%] tests/compute/test_data.py::test_as_graphpred Client [1247] waits on 172.17.0.3:52321 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") end Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... clients have terminated PASSED [ 1%] tests/distributed/test_dist_graph_store.py::test_dist_emb_server_client PASSED [ 66%] tests/cugraph/test_basics.py::test_from_cugraph_conversion PASSED [100%] =============================== warnings summary =============================== ../../../../opt/conda/lib/python3.9/site-packages/dask_cudf/core.py:33 /opt/conda/lib/python3.9/site-packages/dask_cudf/core.py:33: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. DASK_VERSION = LooseVersion(dask.__version__) ../../../../opt/conda/lib/python3.9/site-packages/setuptools/_distutils/version.py:346: 34 warnings /opt/conda/lib/python3.9/site-packages/setuptools/_distutils/version.py:346: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. other = LooseVersion(other) python/dgl/backend/backend.py:1717 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/backend.py:1717: DeprecationWarning: invalid escape sequence \P """Segment reduction operator. python/dgl/backend/pytorch/tensor.py:16 python/dgl/backend/pytorch/tensor.py:16 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:16: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) < LooseVersion("1.9.0"): python/dgl/backend/pytorch/tensor.py:340 python/dgl/backend/pytorch/tensor.py:340 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:340: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) >= LooseVersion("1.10.0"): python/dgl/dataloading/dataloader.py:33 /root/jenkins/workspace/dgl_PR-4648/python/dgl/dataloading/dataloader.py:33: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(torch.__version__) python/dgl/_dataloading/pytorch/dataloader.py:23 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:23: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(th.__version__) python/dgl/_dataloading/pytorch/dataloader.py:24 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:24: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_16 = PYTORCH_VER >= LooseVersion("1.6.0") python/dgl/_dataloading/pytorch/dataloader.py:25 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:25: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_17 = PYTORCH_VER >= LooseVersion("1.7.0") -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html -- generated xml file: /root/jenkins/workspace/dgl_PR-4648/pytest_cugraph.xml -- ============================= slowest 20 durations ============================= 12.17s call tests/cugraph/test_basics.py::test_to_cugraph_conversion 0.05s call tests/cugraph/test_basics.py::test_from_cugraph_conversion (7 durations < 0.005s hidden. Use -vv to show these durations.) ======================= 3 passed, 44 warnings in 19.04s ======================== [Pipeline] } [Pipeline] // timeout [Pipeline] } [Pipeline] // stage Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 b7adb4db5f4cf1d886ff23a8dd7a1caea99af2b94c6076d915adcd91d0c69ad9 $ docker rm -f b7adb4db5f4cf1d886ff23a8dd7a1caea99af2b94c6076d915adcd91d0c69ad9 [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } Running on dglci-manual-gpu-worker in /root/jenkins/workspace/dgl_PR-4648 [Pipeline] // node [Pipeline] { PASSED [ 5%] tests/compute/test_data.py::test_as_linkpred PASSED [ 5%] tests/compute/test_data.py::test_as_linkpred_ogb [Pipeline] } [Pipeline] // stage [Pipeline] } [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags Converting to homogeneous graph takes 0.003s, peak mem: 1.390 GB Save partitions: 0.008 seconds, peak memory: 1.390 GB There are 100000 edges in the graph and 0 edge cuts for 1 partitions. start client[0] for group[0] Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load check_dist_emb_True_1_1_1 start server 0 start graph service on server 0 for part 0 [05:30:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:30:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18465]... [05:30:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:30:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 + docker pull dgllib/dgl-ci-gpu:cu101_v220816 cu101_v220816: Pulling from dgllib/dgl-ci-gpu Digest: sha256:ca40fc52876a2563a4e904d0c271d658c1acc8e6a4f8611b578bb49f8c7fd925 Status: Image is up to date for dgllib/dgl-ci-gpu:cu101_v220816 docker.io/dgllib/dgl-ci-gpu:cu101_v220816 [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker inspect -f . dgllib/dgl-ci-gpu:cu101_v220816 . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dglci-manual-gpu-worker does not seem to be running inside a container $ docker run -t -d -u 0:0 --runtime nvidia -w /root/jenkins/workspace/dgl_PR-4648 -v /root/jenkins/workspace/dgl_PR-4648:/root/jenkins/workspace/dgl_PR-4648:rw,z -v /root/jenkins/workspace/dgl_PR-4648@tmp:/root/jenkins/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-gpu:cu101_v220816 cat Client [1265] waits on 172.17.0.3:51429 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") Client[0] in group[0] is exiting... $ docker top 93431508da8c7bd1ffa88cc07c573b36fd6622338dac6808937ce4f0b46901f0 -eo pid,comm [Pipeline] { [Pipeline] stage [Pipeline] { (MXNet GPU Unit test) [Pipeline] sh + nvidia-smi Tue Sep 27 05:30:16 2022 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 510.47.03 Driver Version: 510.47.03 CUDA Version: 11.6 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla T4 On | 00000000:00:1E.0 Off | 0 | | N/A 42C P8 16W / 70W | 0MiB / 15360MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | No running processes found | +-----------------------------------------------------------------------------+ [Pipeline] sh Server (0) shutdown. Server is exiting... + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials clients have terminated [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@276ec34c; decorates RemoteLauncher[hudson.remoting.Channel@41a64267:dglci-manual-gpu-worker] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] sh + git submodule update --recursive --init Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/googletest'... Converting to homogeneous graph takes 0.003s, peak mem: 1.390 GB Save partitions: 0.008 seconds, peak memory: 1.390 GB There are 100000 edges in the graph and 0 edge cuts for 1 partitions. start client[0] for group[0] Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/libxsmm'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/nccl'... /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load check_dist_emb_False_1_1_1 start server 0 start graph service on server 0 for part 0 [05:30:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:30:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:11787]... [05:30:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:30:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/phmap'... PASSED [ 5%] tests/compute/test_data.py::test_as_nodepred_csvdataset PASSED [ 5%] tests/compute/test_data.py::test_as_graphpred Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tensorpipe'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/thrust'... Client [1284] waits on 172.17.0.3:34707 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") Client[0] in group[0] is exiting... Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tvm'... Server (0) shutdown. Server is exiting... clients have terminated PASSED [ 3%] tests/distributed/test_dist_graph_store.py::test_standalone Converting to homogeneous graph takes 0.003s, peak mem: 1.390 GB Save partitions: 0.005 seconds, peak memory: 1.390 GB There are 100000 edges in the graph and 0 edge cuts for 1 partitions. end Client[-1] in group[-1] is exiting... PASSED [ 5%] tests/distributed/test_dist_graph_store.py::test_standalone_node_emb Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/xbyak'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/METIS/GKlib'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tensorpipe/third_party/googletest'... Converting to homogeneous graph takes 0.003s, peak mem: 1.390 GB Save partitions: 0.005 seconds, peak memory: 1.390 GB There are 100000 edges in the graph and 0 edge cuts for 1 partitions. Client[-1] in group[-1] is exiting... PASSED [ 7%] tests/distributed/test_dist_graph_store.py::test_split[True] Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tensorpipe/third_party/libuv'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/nccl'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/phmap'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust'... Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tensorpipe/third_party/pybind11/tools/clang'... Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/thrust/dependencies/cub'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm'... Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tvm/3rdparty/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tvm/3rdparty/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tvm/3rdparty/rang'... PASSED [ 5%] tests/compute/test_data.py::test_as_graphpred_reprocess Cloning into '/root/jenkins/workspace/dgl_PR-4648@4/third_party/tvm/3rdparty/vta-hw'... Converting to homogeneous graph takes 0.008s, peak mem: 1.451 GB Convert a graph into a bidirected graph: 0.016 seconds, peak memory: 1.451 GB Construct multi-constraint weights: 0.001 seconds, peak memory: 1.451 GB [05:30:43] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 30030 nodes and 601200 edges into 4 parts and get 175264 edge cuts Metis partitioning: 0.187 seconds, peak memory: 1.451 GB Assigning nodes to METIS partitions takes 0.205s, peak mem: 1.451 GB Reshuffle nodes and edges: 0.020 seconds Split the graph: 0.069 seconds Construct subgraphs: 0.077 seconds Splitting the graph into partitions takes 0.166s, peak mem: 1.451 GB part 0 has 10000 nodes of type n1 and 2574 are inside the partition part 0 has 9873 nodes of type n2 and 2576 are inside the partition part 0 has 9901 nodes of type n3 and 2580 are inside the partition part 0 has 92426 edges of type r1 and 25798 are inside the partition part 0 has 40959 edges of type r2 and 25834 are inside the partition part 0 has 40713 edges of type r3 and 25619 are inside the partition part 1 has 10000 nodes of type n1 and 2574 are inside the partition part 1 has 9904 nodes of type n2 and 2576 are inside the partition part 1 has 9906 nodes of type n3 and 2580 are inside the partition part 1 has 92768 edges of type r1 and 25800 are inside the partition part 1 has 40705 edges of type r2 and 25798 are inside the partition part 1 has 41105 edges of type r3 and 25978 are inside the partition part 2 has 9999 nodes of type n1 and 2281 are inside the partition part 2 has 9794 nodes of type n2 and 2281 are inside the partition part 2 has 9801 nodes of type n3 and 2280 are inside the partition part 2 has 89496 edges of type r1 and 22774 are inside the partition part 2 has 36244 edges of type r2 and 22739 are inside the partition part 2 has 36291 edges of type r3 and 22826 are inside the partition part 3 has 10000 nodes of type n1 and 2571 are inside the partition part 3 has 9883 nodes of type n2 and 2577 are inside the partition part 3 has 9906 nodes of type n3 and 2580 are inside the partition part 3 has 92623 edges of type r1 and 25728 are inside the partition part 3 has 40948 edges of type r2 and 25829 are inside the partition part 3 has 40613 edges of type r3 and 25877 are inside the partition Save partitions: 0.043 seconds, peak memory: 1.451 GB There are 300600 edges in the graph and 0 edge cuts for 4 partitions. Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-cpu-linux [Pipeline] timeout Timeout set to expire in 30 min [Pipeline] { PASSED [ 9%] tests/distributed/test_dist_graph_store.py::test_split[False] [Pipeline] sh + bash tests/scripts/task_unit_test.sh mxnet cpu Requirement already satisfied: pytest in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (7.0.1) Collecting psutil Downloading psutil-5.9.2-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (280 kB) Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/xbyak'... Collecting pyyaml Downloading PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (603 kB) Collecting pydantic Downloading pydantic-1.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.2 MB) Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/METIS/GKlib'... Collecting pandas Using cached pandas-1.1.5-cp36-cp36m-manylinux1_x86_64.whl (9.5 MB) Collecting rdflib Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Downloading rdflib-5.0.0-py3-none-any.whl (231 kB) Collecting ogb Downloading ogb-1.3.4-py3-none-any.whl (78 kB) Requirement already satisfied: py>=1.8.2 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from pytest) (1.11.0) Requirement already satisfied: iniconfig in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from pytest) (1.1.1) Requirement already satisfied: importlib-metadata>=0.12 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from pytest) (4.8.3) Requirement already satisfied: pluggy<2.0,>=0.12 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from pytest) (1.0.0) Requirement already satisfied: packaging in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from pytest) (21.3) Requirement already satisfied: attrs>=19.2.0 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from pytest) (21.4.0) Requirement already satisfied: tomli>=1.0.0 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from pytest) (1.2.3) Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from pydantic) (4.1.1) Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' Collecting dataclasses>=0.6 Using cached dataclasses-0.8-py3-none-any.whl (19 kB) Collecting pytz>=2017.2 Downloading pytz-2022.2.1-py2.py3-none-any.whl (500 kB) Converting to homogeneous graph takes 0.002s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.004 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:30:47] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 10000 nodes and 199872 edges into 4 parts and get 56578 edge cuts Metis partitioning: 0.046 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.051s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.005 seconds Split the graph: 0.024 seconds Construct subgraphs: 0.034 seconds Splitting the graph into partitions takes 0.063s, peak mem: 1.529 GB part 0 has 10000 nodes and 2500 are inside the partition part 0 has 91023 edges and 24712 are inside the partition part 1 has 10000 nodes and 2500 are inside the partition part 1 has 91347 edges and 25164 are inside the partition part 2 has 10000 nodes and 2500 are inside the partition part 2 has 91578 edges and 25151 are inside the partition part 3 has 10000 nodes and 2500 are inside the partition part 3 has 91266 edges and 24973 are inside the partition Save partitions: 0.015 seconds, peak memory: 1.529 GB There are 100000 edges in the graph and 0 edge cuts for 4 partitions. Requirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from pandas) (2.8.2) Requirement already satisfied: numpy>=1.15.4 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from pandas) (1.19.5) Requirement already satisfied: pyparsing in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from rdflib) (3.0.9) Requirement already satisfied: six in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from rdflib) (1.16.0) Collecting isodate Downloading isodate-0.6.1-py2.py3-none-any.whl (41 kB) Requirement already satisfied: urllib3>=1.24.0 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from ogb) (1.26.9) Requirement already satisfied: tqdm>=4.29.0 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from ogb) (4.64.0) Collecting torch>=1.6.0 Downloading torch-1.10.2-cp36-cp36m-manylinux1_x86_64.whl (881.9 MB) PASSED [ 10%] tests/distributed/test_dist_graph_store.py::test_split_even Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/googletest'... PASSED [ 5%] tests/compute/test_data.py::test_as_graphpred_reprocess PASSED [ 5%] tests/compute/test_data.py::test_as_graphpred_ogb SKIPPED (ogb only ...) [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[idtype0] PASSED [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[idtype1] PASSED [ 5%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax_unidirectional SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype0-src-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype0-dst-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype1-src-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype1-dst-g0] SKIPPED [ 6%] tests/compute/test_ffi.py::test_cython PASSED [ 6%] tests/compute/test_ffi.py::test_callback[1] PASSED [ 6%] tests/compute/test_ffi.py::test_callback[2.3] PASSED [ 6%] tests/compute/test_ffi.py::test_callback_thread[1] PASSED [ 6%] tests/compute/test_ffi.py::test_callback_thread[2.3] PASSED [ 6%] tests/compute/test_filter.py::test_graph_filter PASSED [ 6%] tests/compute/test_filter.py::test_array_filter[idtype0] SKIPPED (CP...) [ 6%] tests/compute/test_filter.py::test_array_filter[idtype1] SKIPPED (CP...) [ 6%] tests/compute/test_frame.py::test_column_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_plain PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_dtype PASSED [ 6%] tests/compute/test_generators.py::test_rand_graph PASSED [ 6%] tests/compute/test_graph.py::test_query Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/libuv'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tensorpipe/third_party/pybind11/tools/clang'... Converting to homogeneous graph takes 0.003s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.005 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:30:53] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 10000 nodes and 199872 edges into 4 parts and get 56461 edge cuts Metis partitioning: 0.049 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.054s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.005 seconds Split the graph: 0.028 seconds Construct subgraphs: 0.038 seconds Splitting the graph into partitions takes 0.072s, peak mem: 1.529 GB part 0 has 10000 nodes and 2501 are inside the partition part 0 has 91468 edges and 25337 are inside the partition part 1 has 10000 nodes and 2499 are inside the partition part 1 has 91662 edges and 24912 are inside the partition part 2 has 9999 nodes and 2500 are inside the partition part 2 has 91219 edges and 24847 are inside the partition part 3 has 9999 nodes and 2500 are inside the partition part 3 has 90856 edges and 24904 are inside the partition Save partitions: 0.023 seconds, peak memory: 1.529 GB There are 100000 edges in the graph and 0 edge cuts for 4 partitions. part 0 get 1706 nodes and 1706 are in the partition intersection has 1706 part 0 get 17268 edges and 17268 are in the partition intersection has 17268 part 1 get 1706 nodes and 1689 are in the partition intersection has 1706 part 1 get 17267 edges and 16989 are in the partition intersection has 17267 part 2 get 1705 nodes and 1705 are in the partition intersection has 1705 part 2 get 17267 edges and 16964 are in the partition intersection has 17267 part 3 get 1705 nodes and 1703 are in the partition intersection has 1705 part 3 get 17267 edges and 17070 are in the partition intersection has 17267 PASSED [ 12%] tests/distributed/test_distributed_sampling.py::test_rpc_find_edges_shuffle[1] Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.002 seconds, peak memory: 1.529 GB There are 3060 edges in the graph and 0 edge cuts for 1 partitions. Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/thrust/dependencies/cub'... /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_find_edges start graph service on server 0 for part 0 [05:30:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:30:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:19883]... [05:30:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:30:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 6%] tests/compute/test_graph.py::test_mutation PASSED [ 6%] tests/compute/test_graph.py::test_scipy_adjmat PASSED [ 6%] tests/compute/test_graph.py::test_incmat PASSED [ 6%] tests/compute/test_graph.py::test_find_edges PASSED [ 6%] tests/compute/test_graph.py::test_ismultigraph PASSED [ 6%] tests/compute/test_graph.py::test_hypersparse_query PASSED [ 6%] tests/compute/test_graph.py::test_empty_data_initialized PASSED [ 7%] tests/compute/test_graph.py::test_is_sorted PASSED [ 7%] tests/compute/test_graph.py::test_default_types PASSED [ 7%] tests/compute/test_graph.py::test_formats PASSED [ 7%] tests/compute/test_heterograph.py::test_create[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_create[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_create2 PASSED [ 7%] tests/compute/test_heterograph.py::test_query[idtype0] Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/rang'... PASSED [ 5%] tests/compute/test_data.py::test_as_graphpred_ogb Cloning into '/root/jenkins/workspace/dgl_PR-4648/third_party/tvm/3rdparty/vta-hw'... Collecting scikit-learn>=0.20.0 Using cached scikit_learn-0.24.2-cp36-cp36m-manylinux2010_x86_64.whl (22.2 MB) Collecting outdated>=0.2.0 Using cached outdated-0.2.1-py3-none-any.whl (7.5 kB) Requirement already satisfied: zipp>=0.5 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from importlib-metadata>=0.12->pytest) (3.6.0) Collecting littleutils Using cached littleutils-0.2.2-py3-none-any.whl Requirement already satisfied: requests in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from outdated>=0.2.0->ogb) (2.27.1) Collecting threadpoolctl>=2.0.0 Using cached threadpoolctl-3.1.0-py3-none-any.whl (14 kB) Requirement already satisfied: scipy>=0.19.1 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from scikit-learn>=0.20.0->ogb) (1.5.4) Requirement already satisfied: joblib>=0.11 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from scikit-learn>=0.20.0->ogb) (1.1.0) Requirement already satisfied: importlib-resources in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from tqdm>=4.29.0->ogb) (5.4.0) Requirement already satisfied: idna<4,>=2.5 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from requests->outdated>=0.2.0->ogb) (3.3) Requirement already satisfied: charset-normalizer~=2.0.0 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from requests->outdated>=0.2.0->ogb) (2.0.12) Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/envs/mxnet-ci/lib/python3.6/site-packages (from requests->outdated>=0.2.0->ogb) (2021.5.30) Client [1090] waits on 172.17.0.3:45449 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Downloading /root/jenkins/workspace/dgl_PR-4648/cora_v2.zip from https://data.dgl.ai/dataset/cora_v2.zip... Extracting file to /root/jenkins/workspace/dgl_PR-4648/cora_v2 Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash Installing collected packages: threadpoolctl, pytz, littleutils, dataclasses, torch, scikit-learn, pandas, outdated, isodate, rdflib, pyyaml, pydantic, psutil, ogb Finished data loading and preprocessing. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done saving data into cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.029 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 1 partitions. /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_find_edges start graph service on server 0 for part 0 [05:30:58] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:30:58] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13648]... [05:30:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:30:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 7%] tests/compute/test_heterograph.py::test_query[idtype1] Client [1090] waits on 172.17.0.3:60937 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... PASSED [ 14%] tests/distributed/test_distributed_sampling.py::test_rpc_find_edges_shuffle[2] Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:31:02] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 3030 nodes and 6120 edges into 2 parts and get 204 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.005s, peak mem: 1.529 GB part 0 has 587 nodes of type n1 and 519 are inside the partition part 0 has 574 nodes of type n2 and 511 are inside the partition part 0 has 585 nodes of type n3 and 525 are inside the partition part 0 has 565 edges of type r12 and 531 are inside the partition part 0 has 571 edges of type r13 and 535 are inside the partition part 0 has 554 edges of type r23 and 527 are inside the partition part 1 has 556 nodes of type n1 and 491 are inside the partition part 1 has 549 nodes of type n2 and 489 are inside the partition part 1 has 559 nodes of type n3 and 495 are inside the partition part 1 has 515 edges of type r12 and 479 are inside the partition part 1 has 530 edges of type r13 and 495 are inside the partition part 1 has 529 edges of type r23 and 493 are inside the partition Save partitions: 0.003 seconds, peak memory: 1.529 GB There are 3060 edges in the graph and 0 edge cuts for 2 partitions. [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-gpu-linux [Pipeline] timeout Timeout set to expire in 30 min [Pipeline] { [Pipeline] sh + bash tests/scripts/task_unit_test.sh mxnet gpu /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_find_edges start graph service on server 0 for part 0 [05:31:03] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:03] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:10819]... Requirement already satisfied: pytest in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (7.1.2) Collecting psutil PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[idtype1] Downloading psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (281 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 281.3/281.3 kB 10.0 MB/s eta 0:00:00 Collecting pyyaml Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 596.3/596.3 kB 40.6 MB/s eta 0:00:00 PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[idtype1] Collecting pydantic Downloading pydantic-1.10.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.8 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 11.8/11.8 MB 115.3 MB/s eta 0:00:00 PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[idtype0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_find_edges start graph service on server 1 for part 1 [05:31:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:10821]... Collecting pandas Using cached pandas-1.1.5-cp37-cp37m-manylinux1_x86_64.whl (9.5 MB) Collecting rdflib [05:31:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Downloading rdflib-6.2.0-py3-none-any.whl (500 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 500.3/500.3 kB 68.7 MB/s eta 0:00:00 Collecting ogb Downloading ogb-1.3.4-py3-none-any.whl (78 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 78.6/78.6 kB 16.1 MB/s eta 0:00:00 Requirement already satisfied: importlib-metadata>=0.12 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from pytest) (4.12.0) Requirement already satisfied: packaging in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from pytest) (21.3) Requirement already satisfied: pluggy<2.0,>=0.12 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from pytest) (1.0.0) Requirement already satisfied: tomli>=1.0.0 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from pytest) (2.0.1) Requirement already satisfied: attrs>=19.2.0 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from pytest) (22.1.0) Requirement already satisfied: iniconfig in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from pytest) (1.1.1) Requirement already satisfied: py>=1.8.2 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from pytest) (1.11.0) Requirement already satisfied: typing-extensions>=4.1.0 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from pydantic) (4.3.0) Requirement already satisfied: numpy>=1.15.4 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from pandas) (1.21.6) Collecting pytz>=2017.2 Using cached pytz-2022.2.1-py2.py3-none-any.whl (500 kB) Requirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from pandas) (2.8.2) Requirement already satisfied: pyparsing in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from rdflib) (3.0.9) Requirement already satisfied: setuptools in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from rdflib) (61.2.0) Collecting isodate Downloading isodate-0.6.1-py2.py3-none-any.whl (41 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 41.7/41.7 kB 9.7 MB/s eta 0:00:00 Requirement already satisfied: urllib3>=1.24.0 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from ogb) (1.26.11) Collecting scikit-learn>=0.20.0 Using cached scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (24.8 MB) Collecting outdated>=0.2.0 Using cached outdated-0.2.1-py3-none-any.whl (7.5 kB) Collecting torch>=1.6.0 Downloading torch-1.12.1-cp37-cp37m-manylinux1_x86_64.whl (776.3 MB) PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device[idtype0] SKIPPED (...) [ 7%] tests/compute/test_heterograph.py::test_to_device[idtype1] SKIPPED (...) [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-idtype0] SKIPPED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-idtype1] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_to_device2[g1-idtype0] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_to_device2[g1-idtype1] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_pin_memory_[idtype0] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_pin_memory_[idtype1] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_convert_bound[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert_bound[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph[idtype1] Client [1090] waits on 172.17.0.3:36223 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:31:07] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 2 parts and get 260 edge cuts Metis partitioning: 0.002 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.007s, peak mem: 1.529 GB part 0 has 1512 nodes and 1354 are inside the partition part 0 has 5630 edges and 5370 are inside the partition part 1 has 1552 nodes and 1354 are inside the partition part 1 has 5446 edges and 5186 are inside the partition Save partitions: 0.032 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 2 partitions. PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[idtype0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_find_edges start graph service on server 0 for part 0 [05:31:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18024]... /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_find_edges start graph service on server 1 for part 1 [05:31:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18026]... [05:31:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[idtype1] ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 776.3/776.3 MB 4.2 MB/s eta 0:00:00 PASSED [ 8%] tests/compute/test_heterograph.py::test_more_nnz[idtype0] SKIPPED (N...) [ 9%] tests/compute/test_heterograph.py::test_more_nnz[idtype1] SKIPPED (N...) [ 9%] tests/compute/test_heterograph.py::test_updates[idtype0] Client [1090] waits on 172.17.0.3:42195 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (1) shutdown. Server is exiting... Server (0) shutdown. Server is exiting... PASSED [ 16%] tests/distributed/test_distributed_sampling.py::test_rpc_get_degree_shuffle[1] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.023 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 1 partitions. /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_get_degrees start graph service on server 0 for part 0 [05:31:13] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:13] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:27870]... [05:31:13] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:13] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Requirement already satisfied: six>=1.12.0 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from ogb) (1.16.0) Requirement already satisfied: tqdm>=4.29.0 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from ogb) (4.64.0) Requirement already satisfied: zipp>=0.5 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from importlib-metadata>=0.12->pytest) (3.8.1) Requirement already satisfied: requests in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from outdated>=0.2.0->ogb) (2.28.1) Collecting littleutils Using cached littleutils-0.2.2-py3-none-any.whl Requirement already satisfied: scipy>=1.1.0 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.7.3) Collecting threadpoolctl>=2.0.0 Using cached threadpoolctl-3.1.0-py3-none-any.whl (14 kB) Requirement already satisfied: joblib>=0.11 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from scikit-learn>=0.20.0->ogb) (1.1.0) Requirement already satisfied: idna<4,>=2.5 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (3.3) Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2022.6.15) Requirement already satisfied: charset-normalizer<3,>=2 in /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages (from requests->outdated>=0.2.0->ogb) (2.1.0) Installing collected packages: pytz, littleutils, torch, threadpoolctl, pyyaml, pydantic, psutil, isodate, scikit-learn, rdflib, pandas, outdated, ogb PASSED [ 9%] tests/compute/test_heterograph.py::test_updates[idtype1] Client [1090] waits on 172.17.0.3:36663 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Done get_degreeServer (0) shutdown. Server is exiting... PASSED [ 9%] tests/compute/test_heterograph.py::test_backward[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_backward[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[idtype0] check results PASSED [ 18%] tests/distributed/test_distributed_sampling.py::test_rpc_get_degree_shuffle[2] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:31:17] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 2 parts and get 260 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.007s, peak mem: 1.529 GB part 0 has 1512 nodes and 1354 are inside the partition part 0 has 5630 edges and 5370 are inside the partition part 1 has 1552 nodes and 1354 are inside the partition part 1 has 5446 edges and 5186 are inside the partition Save partitions: 0.021 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 2 partitions. PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_get_degrees start graph service on server 0 for part 0 [05:31:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:12848]... PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_get_degrees start graph service on server 1 for part 1 [05:31:19] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:19] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:12850]... PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[idtype0] [05:31:19] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:19] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Successfully installed dataclasses-0.8 isodate-0.6.1 littleutils-0.2.2 ogb-1.3.4 outdated-0.2.1 pandas-1.1.5 psutil-5.9.2 pydantic-1.9.2 pytz-2022.2.1 pyyaml-6.0 rdflib-5.0.0 scikit-learn-0.24.2 threadpoolctl-3.1.0 torch-1.10.2 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ============================= test session starts ============================== platform linux -- Python 3.6.9, pytest-7.0.1, pluggy-1.0.0 -- /opt/conda/envs/mxnet-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648@4 collecting ... PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_float_cast PASSED [ 9%] tests/compute/test_heterograph.py::test_format[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_format[idtype1] Client [1090] waits on 172.17.0.3:34321 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... Done get_degree check results PASSED [ 20%] tests/distributed/test_distributed_sampling.py::test_rpc_sampling SKIPPED [ 21%] tests/distributed/test_distributed_sampling.py::test_rpc_sampling_shuffle[1] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.026 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 1 partitions. PASSED [ 10%] tests/compute/test_heterograph.py::test_edges_order[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_edges_order[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_reverse[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_reverse[idtype1] PASSED [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[idtype0] PASSED [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[idtype1] PASSED [ 5%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax_unidirectional FAILED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype0-src-g0] FAILED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype0-dst-g0] FAILED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype1-src-g0] FAILED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype1-dst-g0] FAILED [ 6%] tests/compute/test_ffi.py::test_cython PASSED [ 6%] tests/compute/test_ffi.py::test_callback[1] PASSED [ 6%] tests/compute/test_ffi.py::test_callback[2.3] PASSED [ 6%] tests/compute/test_ffi.py::test_callback_thread[1] PASSED [ 6%] tests/compute/test_ffi.py::test_callback_thread[2.3] PASSED [ 6%] tests/compute/test_filter.py::test_graph_filter PASSED [ 6%] tests/compute/test_filter.py::test_array_filter[idtype0] SKIPPED (CP...) [ 6%] tests/compute/test_filter.py::test_array_filter[idtype1] SKIPPED (CP...) [ 6%] tests/compute/test_frame.py::test_column_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_plain PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_dtype PASSED [ 6%] tests/compute/test_generators.py::test_rand_graph PASSED [ 6%] tests/compute/test_graph.py::test_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:31:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:17223]... PASSED [ 10%] tests/compute/test_heterograph.py::test_clone[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_clone[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[idtype1] [05:31:24] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:24] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame_device[idtype0] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_frame_device[idtype1] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_create_block[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_create_block[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csc-idtype0] PASSED [ 11%] tests/compute/test_heterograph.py::test_adj_sparse[csc-idtype1] PASSED [ 11%] tests/compute/test_heterograph.py::test_forking_pickler collected 2453 items tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[int32] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[int64] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[int32] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[int64] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[int32] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[int64] SKIPPED [ 0%] tests/compute/test_backend.py::test_set_default_backend PASSED [ 0%] tests/compute/test_basics.py::test_compatible PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[int32] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[int64] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[int32] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[int64] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[int32] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[int64] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[int32] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[int64] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[int32] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[int64] PASSED [ 6%] tests/compute/test_graph.py::test_mutation PASSED [ 6%] tests/compute/test_graph.py::test_scipy_adjmat PASSED [ 6%] tests/compute/test_graph.py::test_incmat PASSED [ 6%] tests/compute/test_graph.py::test_find_edges PASSED [ 6%] tests/compute/test_graph.py::test_ismultigraph PASSED [ 6%] tests/compute/test_graph.py::test_hypersparse_query PASSED [ 6%] tests/compute/test_graph.py::test_empty_data_initialized PASSED [ 7%] tests/compute/test_graph.py::test_is_sorted PASSED [ 7%] tests/compute/test_graph.py::test_default_types PASSED [ 7%] tests/compute/test_graph.py::test_formats PASSED [ 7%] tests/compute/test_heterograph.py::test_create[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_create[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_create2 PASSED [ 7%] tests/compute/test_heterograph.py::test_query[idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[int32] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[int64] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[int32] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[int64] PASSED [ 0%] tests/compute/test_basics.py::test_dynamic_addition PASSED [ 0%] tests/compute/test_basics.py::test_repr[int32] PASSED [ 0%] tests/compute/test_basics.py::test_repr[int64] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[int32] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[int64] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[int32] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[int64] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[int32] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[int64] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[int32] Client [1403] waits on 172.17.0.3:44143 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.004 seconds, peak memory: 1.529 GB There are 3060 edges in the graph and 0 edge cuts for 1 partitions. PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[int64] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[int32] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[int64] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_query[idtype1] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[int64] PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[int32] PASSED [ 11%] tests/compute/test_index.py::test_dlpack SKIPPED (TF doesn't support...) [ 11%] tests/compute/test_kernel.py::test_copy_src_reduce /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:31:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:24719]... PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[int64] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[int32] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[int64] [05:31:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[int32] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[int64] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[int32] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[int64] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[int32] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[int64] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[int32] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[int64] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_propagate[int32] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_propagate[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[idtype0] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[int32] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[int64] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[int32] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[int64] Client [1090] waits on 172.17.0.3:50765 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Done samplingServer is exiting... Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.001 seconds, peak memory: 1.529 GB There are 3000 edges in the graph and 0 edge cuts for 1 partitions. PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[idtype1] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[int32] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[int64] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[int32] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:31:33] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:33] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13648]... [05:31:33] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:33] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[idtype1] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device[idtype0] SKIPPED (...) [ 7%] tests/compute/test_heterograph.py::test_to_device[idtype1] SKIPPED (...) [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-idtype0] SKIPPED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-idtype1] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_to_device2[g1-idtype0] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_to_device2[g1-idtype1] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_pin_memory_[idtype0] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_pin_memory_[idtype1] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_convert_bound[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert_bound[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[int32-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[int32-gs1] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[int64-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[int64-gs1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[int64] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[idtype0] PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[idtype1] PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[int64] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[int32] SKIPPED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[int64] SKIPPED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[int32] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[int64] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[int32] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[int64] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[int32] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[int64] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_netypes PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[float32-int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[float64-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[float64-int64] Successfully installed isodate-0.6.1 littleutils-0.2.2 ogb-1.3.4 outdated-0.2.1 pandas-1.1.5 psutil-5.9.2 pydantic-1.10.2 pytz-2022.2.1 pyyaml-6.0 rdflib-6.2.0 scikit-learn-1.0.2 threadpoolctl-3.1.0 torch-1.12.1 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/mxnet-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648 collecting ... PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-float32-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-float64-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-float64-int64] Client [1090] waits on 172.17.0.3:43441 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Done sampling Server (0) shutdown. Server is exiting... Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.002 seconds, peak memory: 1.529 GB There are 13220 edges in the graph and 0 edge cuts for 1 partitions. PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-float32-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-float64-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-float64-int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_more_nnz[idtype0] SKIPPED (N...) [ 9%] tests/compute/test_heterograph.py::test_more_nnz[idtype1] SKIPPED (N...) [ 9%] tests/compute/test_heterograph.py::test_updates[idtype0] FAILED [ 9%] tests/compute/test_heterograph.py::test_updates[idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[float32-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[float64-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[float64-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-float32-int64] FAILED [ 9%] tests/compute/test_heterograph.py::test_backward[idtype0] FAILED [ 9%] tests/compute/test_heterograph.py::test_backward[idtype1] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-float64-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-float64-int64] FAILED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[idtype0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:31:37] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:37] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13648]... [05:31:37] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:37] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-float32-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrsum_backward[2-float64-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-float32-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-float32-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-float64-int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[idtype1] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-float32-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-float32-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-float64-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-float32-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-float32-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-float64-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-float32-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-float32-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-float64-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[float32-int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[idtype0] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[idtype1] PASSED [ 9%] tests/compute/test_heterograph.py::test_float_cast PASSED [ 9%] tests/compute/test_heterograph.py::test_format[idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[float32-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[float64-int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_format[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_edges_order[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_edges_order[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_reverse[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_reverse[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_clone[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_clone[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[idtype0] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[float64-int64] PASSED [ 4%] tests/compute/test_data.py::test_minigc SKIPPED (Skip MXNet) [ 4%] tests/compute/test_data.py::test_gin SKIPPED (Skip MXNet) [ 4%] tests/compute/test_data.py::test_fraud SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_fakenews SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_tudataset_regression SKIPPED (Skip ...) [ 5%] tests/compute/test_data.py::test_data_hash SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_citation_graph SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_gnn_benchmark SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_reddit SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_explain_syn SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_wiki_cs SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_yelp SKIPPED (Dataset too large to ...) [ 5%] tests/compute/test_data.py::test_flickr SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_extract_archive SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_csvdataset SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_add_nodepred_split SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_as_nodepred1 SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_as_nodepred2 SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_as_nodepred_ogb SKIPPED (ogb only s...) [ 5%] tests/compute/test_data.py::test_as_linkpred SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_as_linkpred_ogb SKIPPED (ogb only s...) [ 5%] tests/compute/test_data.py::test_as_nodepred_csvdataset SKIPPED (Ski...) [ 5%] tests/compute/test_data.py::test_as_graphpred SKIPPED (Skip MXNet) [ 5%] tests/compute/test_data.py::test_as_graphpred_reprocess SKIPPED (Ski...) [ 5%] tests/compute/test_data.py::test_as_graphpred_ogb SKIPPED (ogb only ...) [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame_device[idtype0] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_frame_device[idtype1] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_create_block[idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_create_block[idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-idtype0] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-idtype1] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csc-idtype0] PASSED [ 11%] tests/compute/test_heterograph.py::test_adj_sparse[csc-idtype1] PASSED [ 11%] tests/compute/test_heterograph.py::test_forking_pickler PASSED [ 11%] tests/compute/test_kernel.py::test_copy_edge_reduce PASSED [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[int64] PASSED [ 5%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax_unidirectional SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[int32-src-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[int32-dst-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[int64-src-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[int64-dst-g0] SKIPPED [ 6%] tests/compute/test_filter.py::test_graph_filter PASSED [ 6%] tests/compute/test_filter.py::test_array_filter[int32] SKIPPED (CPU ...) [ 6%] tests/compute/test_filter.py::test_array_filter[int64] SKIPPED (CPU ...) [ 6%] tests/compute/test_frame.py::test_column_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_plain PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_dtype PASSED [ 6%] tests/compute/test_generators.py::test_rand_graph PASSED [ 6%] tests/compute/test_graph.py::test_query PASSED [ 11%] tests/compute/test_index.py::test_dlpack PASSED [ 11%] tests/compute/test_kernel.py::test_copy_src_reduce FAILED [ 11%] tests/compute/test_kernel.py::test_copy_edge_reduce FAILED [ 11%] tests/compute/test_kernel.py::test_all_binary_builtins FAILED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-idtype0] FAILED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-idtype1] FAILED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[idtype0] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[idtype1] PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_id SKIPPED (NCCL only runs on ...) [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_remainder SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_remainder SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_range SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_range SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_support SKIPPED (NCCL only run...) [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype0] FAILED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype1] FAILED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype0] Client [1090] waits on 172.17.0.3:58957 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... FAILED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype1] FAILED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype0] FAILED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype1] FAILED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[idtype0] SKIPPED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_index PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph_index PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g0-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g0-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g1-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g1-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g11-idtype0] PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g11-idtype1] PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g12-idtype0] PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g12-idtype1] PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_batched_heterograph PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_subgraph PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[idtype0] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[idtype1] SKIPPED [ 13%] tests/compute/test_pin_memory.py::test_pin_unpin SKIPPED (Need gpu f...) [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[idtype0] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[idtype1] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[idtype0] Done sampling Server (0) shutdown. Server is exiting... Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.002 seconds, peak memory: 1.529 GB There are 13220 edges in the graph and 0 edge cuts for 1 partitions. PASSED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[idtype1] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype0] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:31:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13648]... [05:31:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 13%] tests/compute/test_random.py::test_random_choice PASSED [ 13%] tests/compute/test_readout.py::test_sum_case1[idtype0] FAILED [ 13%] tests/compute/test_readout.py::test_sum_case1[idtype1] FAILED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-idtype0] FAILED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-idtype1] FAILED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-idtype0] FAILED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-idtype1] PASSED [ 6%] tests/compute/test_graph.py::test_mutation PASSED [ 6%] tests/compute/test_graph.py::test_scipy_adjmat PASSED [ 6%] tests/compute/test_graph.py::test_incmat PASSED [ 6%]FAILED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-idtype0] FAILED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-idtype1] FAILED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-idtype0] FAILED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-idtype0] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-idtype0] tests/compute/test_graph.py::test_find_edges PASSED [ 6%] tests/compute/test_graph.py::test_ismultigraph PASSED [ 6%] tests/compute/test_graph.py::test_hypersparse_query PASSED [ 6%] tests/compute/test_graph.py::test_empty_data_initialized PASSED [ 6%] tests/compute/test_graph.py::test_is_sorted PASSED [ 6%] tests/compute/test_graph.py::test_default_types PASSED [ 6%] tests/compute/test_graph.py::test_formats PASSED [ 6%] tests/compute/test_heterograph.py::test_create[int32] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-idtype0] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-idtype0] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-idtype0] PASSED [ 7%] tests/compute/test_heterograph.py::test_create[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_create2 PASSED [ 7%] tests/compute/test_heterograph.py::test_query[int32] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-idtype0] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-idtype0] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-idtype0] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-idtype1] PASSED [ 11%] tests/compute/test_kernel.py::test_all_binary_builtins FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-idtype0] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-idtype0] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-idtype0] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-idtype1] FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-idtype0] collected 2453 items tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[int32] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[int64] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[int32] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_unary_copy_e[int64] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[int32] SKIPPED [ 0%] tests/compute/test_apply_edges_hetero.py::test_binary_op[int64] SKIPPED [ 0%] tests/compute/test_backend.py::test_set_default_backend PASSED [ 0%] tests/compute/test_basics.py::test_compatible FAILED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-idtype1] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[int32] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-idtype0] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_getter[int64] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[int32] PASSED [ 0%] tests/compute/test_basics.py::test_batch_setter_autograd[int64] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[int32] PASSED [ 0%] tests/compute/test_basics.py::test_apply_nodes[int64] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[int32] PASSED [ 0%] tests/compute/test_basics.py::test_apply_edges[int64] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[int32] PASSED [ 0%] tests/compute/test_basics.py::test_update_routines[int64] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[int32] PASSED [ 0%] tests/compute/test_basics.py::test_update_all_0deg[int64] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[int32] PASSED [ 0%] tests/compute/test_basics.py::test_pull_0deg[int64] PASSED [ 0%] tests/compute/test_basics.py::test_dynamic_addition PASSED [ 0%] tests/compute/test_basics.py::test_repr[int32] PASSED [ 0%] tests/compute/test_basics.py::test_repr[int64] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[int32] PASSED [ 1%] tests/compute/test_basics.py::test_local_var[int64] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[int32] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-idtype0] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-idtype1] FAILED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-idtype0] PASSED [ 1%] tests/compute/test_basics.py::test_local_scope[int64] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[int32] PASSED [ 1%] tests/compute/test_basics.py::test_isolated_nodes[int64] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[int32] PASSED [ 1%] tests/compute/test_basics.py::test_send_multigraph[int64] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[int32] PASSED [ 1%] tests/compute/test_basics.py::test_issue_1088[int64] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[int32] PASSED [ 1%] tests/compute/test_basics.py::test_degree_bucket_edge_ordering[int64] PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[int32] PASSED [ 1%] tests/compute/test_basics.py::test_issue_2484[int64] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[int32] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch[int64] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[int32] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch1[int64] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[int32] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch_frame[int64] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[int32] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_unbatch2[int64] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-idtype0] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-idtype0] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-idtype0] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-idtype0] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[int32] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_send_and_recv[int64] PASSED [ 1%] tests/compute/test_batched_graph.py::test_batch_propagate[int32] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_propagate[int64] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[int32] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batched_edge_ordering[int64] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[int32] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_no_edge[int64] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[int32] PASSED [ 2%] tests/compute/test_batched_graph.py::test_batch_keeps_empty_data[int64] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[int32] PASSED [ 2%] tests/compute/test_batched_graph.py::test_set_batch_info[int64] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[int32-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[int32-gs1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-idtype0] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-idtype0] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[int64-gs0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_topology[int64-gs1] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[int32] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batching_batched[int64] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[int32] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_features[int64] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[int32] SKIPPED [ 2%] tests/compute/test_batched_heterograph.py::test_empty_relation[int64] SKIPPED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[int32] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_unbatch2[int64] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[int32] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_slice_batch[int64] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-idtype0] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-idtype0] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-idtype0] PASSED [ 2%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[int32] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_keeps_empty_data[int64] PASSED [ 3%] tests/compute/test_batched_heterograph.py::test_batch_netypes SKIPPED [ 3%] tests/compute/test_csrmm.py::test_csrmm[float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[float32-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[float64-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm[float64-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-float32-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-float64-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[1-float64-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-float32-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-float64-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrmm_backward[2-float64-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[float32-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[float64-int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_query[int64] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-idtype0] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-idtype1] FAILED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-idtype0] FAILED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-idtype1] FAILED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-idtype0] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum[float64-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-float32-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-float64-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[1-float64-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-float32-int32] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-float32-int64] PASSED [ 3%] tests/compute/test_csrmm.py::test_csrsum_backward[2-float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrsum_backward[2-float64-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-float32-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-float32-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-9000-float64-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-float32-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-float32-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[9000-0-float64-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-float32-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-float32-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-9000-float64-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-float32-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-float32-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask[0-0-float64-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[float32-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[float32-int64] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[float64-int32] PASSED [ 4%] tests/compute/test_csrmm.py::test_csrmask_backward[float64-int64] Client [1090] waits on 172.17.0.3:37923 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Done sampling Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.002 seconds, peak memory: 1.529 GB There are 11990 edges in the graph and 0 edge cuts for 1 partitions. FAILED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-idtype1] FAILED [ 17%] tests/compute/test_readout.py::test_topk[True-g0-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g0-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-idtype0] PASSED [ 4%] tests/compute/test_data.py::test_minigc SKIPPED (Datasets don't need...) [ 4%] tests/compute/test_data.py::test_gin SKIPPED (Datasets don't need to...) [ 4%] tests/compute/test_data.py::test_fraud SKIPPED (Datasets don't need ...) [ 5%] tests/compute/test_data.py::test_fakenews SKIPPED (Datasets don't ne...) [ 5%] tests/compute/test_data.py::test_tudataset_regression SKIPPED (Datas...) [ 5%] tests/compute/test_data.py::test_data_hash SKIPPED (Datasets don't n...) [ 5%] tests/compute/test_data.py::test_citation_graph SKIPPED (Datasets do...) [ 5%] tests/compute/test_data.py::test_gnn_benchmark SKIPPED (Datasets don...) [ 5%] tests/compute/test_data.py::test_reddit SKIPPED (Datasets don't need...) [ 5%] tests/compute/test_data.py::test_explain_syn SKIPPED (Datasets don't...) [ 5%] tests/compute/test_data.py::test_wiki_cs SKIPPED (Datasets don't nee...) [ 5%] tests/compute/test_data.py::test_yelp SKIPPED (Dataset too large to ...) [ 5%] tests/compute/test_data.py::test_flickr SKIPPED (Datasets don't need...) [ 5%] tests/compute/test_data.py::test_extract_archive SKIPPED (Datasets d...) [ 5%] tests/compute/test_data.py::test_csvdataset SKIPPED (Datasets don't ...) [ 5%] tests/compute/test_data.py::test_add_nodepred_split SKIPPED (Dataset...) [ 5%] tests/compute/test_data.py::test_as_nodepred1 SKIPPED (Datasets don'...) [ 5%] tests/compute/test_data.py::test_as_nodepred2 SKIPPED (Datasets don'...) [ 5%] tests/compute/test_data.py::test_as_nodepred_ogb SKIPPED (ogb only s...) [ 5%] tests/compute/test_data.py::test_as_linkpred SKIPPED (Datasets don't...) [ 5%] tests/compute/test_data.py::test_as_linkpred_ogb SKIPPED (ogb only s...) [ 5%] tests/compute/test_data.py::test_as_nodepred_csvdataset SKIPPED (Dat...) [ 5%] tests/compute/test_data.py::test_as_graphpred SKIPPED (Datasets don'...) [ 5%] tests/compute/test_data.py::test_as_graphpred_reprocess SKIPPED (Dat...) [ 5%] tests/compute/test_data.py::test_as_graphpred_ogb SKIPPED (ogb only ...) [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[int32] PASSED [ 5%] tests/compute/test_dataloader.py::test_edge_prediction_sampler[int64] PASSED [ 5%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax_unidirectional SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[int32-src-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[int32-dst-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[int64-src-g0] SKIPPED [ 6%] tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[int64-dst-g0] SKIPPED [ 6%] tests/compute/test_filter.py::test_graph_filter PASSED [ 6%] tests/compute/test_filter.py::test_array_filter[int32] PASSED [ 6%] tests/compute/test_filter.py::test_array_filter[int64] PASSED [ 6%] tests/compute/test_frame.py::test_column_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_plain PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_subcolumn PASSED [ 6%] tests/compute/test_frame.py::test_serialize_deserialize_dtype PASSED [ 6%] tests/compute/test_generators.py::test_rand_graph SKIPPED (GPU rando...) [ 6%] tests/compute/test_graph.py::test_query PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g4-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g4-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g5-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g5-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-idtype0] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g0-idtype1] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g1-idtype0] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g1-idtype1] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g2-idtype0] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g2-idtype1] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g3-idtype0] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g3-idtype1] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g4-idtype0] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g4-idtype1] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g5-idtype0] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g5-idtype1] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g6-idtype0] FAILED [ 18%] tests/compute/test_readout.py::test_softmax[g6-idtype1] FAILED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g2-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g2-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g3-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g3-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g4-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g4-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[idtype0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:31:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13648]... PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_issue1287[idtype0] PASSED [ 20%] tests/compute/test_removal.py::test_issue1287[idtype1] PASSED [ 20%] tests/compute/test_sampler.py::test_create_full PASSED [ 20%] tests/compute/test_sampler.py::test_1neighbor_sampler_all PASSED [ 20%] tests/compute/test_sampler.py::test_1neighbor_sampler [05:31:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 20%] tests/compute/test_sampler.py::test_prefetch_neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler_all PASSED [ 6%] tests/compute/test_graph.py::test_mutation PASSED [ 6%] tests/compute/test_graph.py::test_scipy_adjmat PASSED [ 6%] tests/compute/test_graph.py::test_incmat PASSED [ 6%] tests/compute/test_graph.py::test_find_edges PASSED [ 6%] tests/compute/test_graph.py::test_ismultigraph PASSED [ 6%] tests/compute/test_graph.py::test_hypersparse_query PASSED [ 6%] tests/compute/test_graph.py::test_empty_data_initialized PASSED [ 6%] tests/compute/test_graph.py::test_is_sorted PASSED [ 6%] tests/compute/test_graph.py::test_default_types PASSED [ 6%] tests/compute/test_graph.py::test_formats PASSED [ 6%] tests/compute/test_heterograph.py::test_create[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_create[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_create2 PASSED [ 7%] tests/compute/test_heterograph.py::test_query[int32] PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_layer_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_setseed PASSED [ 20%] tests/compute/test_sampler.py::test_negative_sampler PASSED [ 7%] tests/compute/test_heterograph.py::test_query[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g1-int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g1-int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_pin_memory_[int32] SKIPPED (...) [ 7%] tests/compute/test_heterograph.py::test_pin_memory_[int64] SKIPPED (...) [ 7%] tests/compute/test_heterograph.py::test_convert_bound[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert_bound[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[int32] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[int64] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[int32] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[int64] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_subgraph[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[int64] Client [1090] waits on 172.17.0.3:34041 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Done sampling Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.001 seconds, peak memory: 1.529 GB There are 1000 edges in the graph and 0 edge cuts for 1 partitions. PASSED [ 8%] tests/compute/test_heterograph.py::test_more_nnz[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_more_nnz[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_updates[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_updates[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_backward[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_backward[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_float_cast PASSED [ 9%] tests/compute/test_heterograph.py::test_format[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_format[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:31:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13648]... [05:31:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 9%] tests/compute/test_heterograph.py::test_edges_order[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_edges_order[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_reverse[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_reverse[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_clone[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_clone[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[int32] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[int64] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame_device[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame_device[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_create_block[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_create_block[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csc-int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csc-int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_forking_pickler SKIPPED (MXN...) [ 10%] tests/compute/test_index.py::test_dlpack PASSED [ 10%] tests/compute/test_kernel.py::test_copy_src_reduce PASSED [ 11%] tests/compute/test_kernel.py::test_copy_edge_reduce PASSED [ 11%] tests/compute/test_kernel.py::test_all_binary_builtins PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_empty_query[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_adj[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_inc[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_view[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[int32] Client [1090] waits on 172.17.0.3:58207 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Done sampling Server (0) shutdown. Server is exiting... Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.001 seconds, peak memory: 1.529 GB There are 1000 edges in the graph and 0 edge cuts for 1 partitions. /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:31:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:10819]... [05:31:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 7%] tests/compute/test_heterograph.py::test_view1[int64] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[int32] PASSED [ 7%] tests/compute/test_heterograph.py::test_flatten[int64] Client [1090] waits on 172.17.0.3:45951 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Done sampling Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.001 seconds, peak memory: 1.529 GB There are 1000 edges in the graph and 0 edge cuts for 1 partitions. /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:31:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:19012]... [05:31:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:31:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 7%] tests/compute/test_heterograph.py::test_to_device[int32] SKIPPED (Ne...) [ 7%] tests/compute/test_heterograph.py::test_to_device[int64] SKIPPED (Ne...) [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-int32] SKIPPED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g0-int64] SKIPPED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g1-int32] SKIPPED [ 7%] tests/compute/test_heterograph.py::test_to_device2[g1-int64] SKIPPED [ 7%] tests/compute/test_heterograph.py::test_pin_memory_[int32] SKIPPED (...) [ 7%] tests/compute/test_heterograph.py::test_pin_memory_[int64] SKIPPED (...) [ 7%] tests/compute/test_heterograph.py::test_convert_bound[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert_bound[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_convert[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo_zero_nodes[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_to_homo2[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_invertible_conversion[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_metagraph_reachable[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[int32] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_subgraph_mask[int64] SKIPPED [ 8%] tests/compute/test_heterograph.py::test_subgraph[int32] PASSED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-int32] PASSED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-int64] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[int32] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[int64] PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_id PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_remainder PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_remainder PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_range PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_range PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_support PASSED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[int32] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[int64] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[int32] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[int64] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[int32] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[int64] SKIPPED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[int32] PASSED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[int64] PASSED [ 11%] tests/compute/test_pickle.py::test_pickling_index PASSED [ 11%] tests/compute/test_pickle.py::test_pickling_graph_index PASSED [ 11%] tests/compute/test_pickle.py::test_pickling_graph[g0-int32] SKIPPED [ 11%] tests/compute/test_pickle.py::test_pickling_graph[g0-int64] SKIPPED [ 11%] tests/compute/test_pickle.py::test_pickling_graph[g1-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g1-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g11-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g11-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g12-int32] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g12-int64] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_batched_heterograph SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_subgraph SKIPPED (GPU ed...) [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[int32] PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[int64] PASSED [ 13%] tests/compute/test_pin_memory.py::test_pin_unpin PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[int32] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[int64] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[int32] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[int64] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[int32] SKIPPED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[int64] SKIPPED [ 13%] tests/compute/test_random.py::test_random_choice SKIPPED (GPU random...) [ 13%] tests/compute/test_readout.py::test_sum_case1[int32] PASSED [ 13%] tests/compute/test_readout.py::test_sum_case1[int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-int64] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-int32] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-int64] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-int32] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-int64] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-int32] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_subgraph[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-int64] PASSED [ 16%] tests/compute/test_readout.py::test_topk[True-g0-int32] PASSED [ 16%] tests/compute/test_readout.py::test_topk[True-g0-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-int64] Client [1090] waits on 172.17.0.3:60369 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown.Done sampling Server is exiting... PASSED [ 8%] tests/compute/test_heterograph.py::test_apply[int64] PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-int32] Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.001 seconds, peak memory: 1.529 GB There are 1000 edges in the graph and 0 edge cuts for 1 partitions. PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g4-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g4-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g5-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g5-int64] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-int32] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g1-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g1-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g2-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g2-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g3-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g3-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g4-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g4-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g5-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g5-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g6-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g6-int64] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-int32] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-int64] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-int32] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-int64] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g2-int32] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g2-int64] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g3-int32] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g3-int64] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g4-int32] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g4-int64] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-int32] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-int64] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-int32] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-int64] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[int32] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[int64] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[int32] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[int64] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[int32] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[int64] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[int32] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[int64] PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[int32] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:32:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18024]... [05:32:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[int64] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[int32] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[int64] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[int32] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[int64] PASSED [ 19%] tests/compute/test_removal.py::test_issue1287[int32] PASSED [ 19%] tests/compute/test_removal.py::test_issue1287[int64] PASSED [ 19%] tests/compute/test_sampler.py::test_create_full PASSED [ 19%] tests/compute/test_sampler.py::test_1neighbor_sampler_all PASSED [ 19%] tests/compute/test_sampler.py::test_1neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_prefetch_neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler_all PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_layer_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_setseed PASSED [ 20%] tests/compute/test_sampler.py::test_negative_sampler PASSED [ 8%] tests/compute/test_heterograph.py::test_level2[int64] Client [1090] waits on 172.17.0.3:56707 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Done sampling PASSED [ 23%] tests/distributed/test_distributed_sampling.py::test_rpc_sampling_shuffle[2] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:32:07] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 2 parts and get 260 edge cuts Metis partitioning: 0.002 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.003s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.006s, peak mem: 1.529 GB part 0 has 1512 nodes and 1354 are inside the partition part 0 has 5630 edges and 5370 are inside the partition part 1 has 1552 nodes and 1354 are inside the partition part 1 has 5446 edges and 5186 are inside the partition Save partitions: 0.020 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 2 partitions. /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:32:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:17314]... /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:32:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:17316]... /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") [05:32:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 8%] tests/compute/test_heterograph.py::test_more_nnz[int32] SKIPPED (Nee...) [ 8%] tests/compute/test_heterograph.py::test_more_nnz[int64] SKIPPED (Nee...) [ 8%] tests/compute/test_heterograph.py::test_updates[int32] PASSED [ 8%] tests/compute/test_heterograph.py::test_updates[int64] Client [1509] waits on 172.17.0.3:59519 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... PASSED [ 8%] tests/compute/test_heterograph.py::test_backward[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_backward[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_empty_heterograph[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[int32] Server (1) shutdown. Server is exiting... PASSED [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_node2vec PASSED [ 20%] tests/compute/test_sampling.py::test_pack_traces PASSED [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[False] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_noprob PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_prob PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_outedge Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:32:14] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 3030 nodes and 6120 edges into 2 parts and get 204 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.005s, peak mem: 1.529 GB part 0 has 587 nodes of type n1 and 519 are inside the partition part 0 has 574 nodes of type n2 and 511 are inside the partition part 0 has 585 nodes of type n3 and 525 are inside the partition part 0 has 565 edges of type r12 and 531 are inside the partition part 0 has 571 edges of type r13 and 535 are inside the partition part 0 has 554 edges of type r23 and 527 are inside the partition part 1 has 556 nodes of type n1 and 491 are inside the partition part 1 has 549 nodes of type n2 and 489 are inside the partition part 1 has 559 nodes of type n3 and 495 are inside the partition part 1 has 515 edges of type r12 and 479 are inside the partition part 1 has 530 edges of type r13 and 495 are inside the partition part 1 has 529 edges of type r23 and 493 are inside the partition Save partitions: 0.005 seconds, peak memory: 1.529 GB There are 3060 edges in the graph and 0 edge cuts for 2 partitions. PASSED [ 9%] tests/compute/test_heterograph.py::test_types_in_function[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_stack_reduce[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[int32] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:32:15] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:15] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:14572]... PASSED [ 9%] tests/compute/test_heterograph.py::test_isolated_ntype[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_ismultigraph[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_graph_index_is_unibipartite[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_bipartite[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_dtype_cast[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_float_cast /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:32:16] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:16] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:14574]... [05:32:17] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:17] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 9%] tests/compute/test_heterograph.py::test_format[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_format[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_edges_order[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_edges_order[int64] PASSED [ 9%] tests/compute/test_heterograph.py::test_reverse[int32] PASSED [ 9%] tests/compute/test_heterograph.py::test_reverse[int64] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk PASSED [ 9%] tests/compute/test_heterograph.py::test_clone[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_clone[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[int32] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk_outedge PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_with_0deg PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_biased_homogeneous PASSED [ 10%] tests/compute/test_heterograph.py::test_add_edges[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_biased_bipartite PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-coo] PASSED [ 10%] tests/compute/test_heterograph.py::test_add_nodes[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[int32] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_remove_edges[int64] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-coo] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csc] PASSED [ 10%] tests/compute/test_heterograph.py::test_remove_nodes[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-coo] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csc] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_frame_device[int32] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_frame_device[int64] SKIPPED [ 10%] tests/compute/test_heterograph.py::test_create_block[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_create_block[int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-coo] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[coo-int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csr-int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csc-int32] PASSED [ 10%] tests/compute/test_heterograph.py::test_adj_sparse[csc-int64] PASSED [ 10%] tests/compute/test_heterograph.py::test_forking_pickler SKIPPED (MXN...) [ 10%] tests/compute/test_index.py::test_dlpack PASSED [ 10%] tests/compute/test_kernel.py::test_copy_src_reduce PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[True] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[False] Client [1090] waits on 172.17.0.3:39975 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[True] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[False] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[True] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[False] PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_tensors PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_empty_dict PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files1 PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files2 PASSED [ 22%] tests/compute/test_serialize.py::test_deserialize_old_heterograph_file PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph_s3 SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_single_process[idtype0] PASSED [ 22%] tests/compute/test_shared_mem.py::test_single_process[idtype1] PASSED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[idtype0] PASSED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[idtype1] PASSED [ 22%] tests/compute/test_shared_mem.py::test_copy_from_gpu SKIPPED (Need g...) [ 22%] tests/compute/test_sort.py::test_sort_with_tag[idtype0] PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag[idtype1] Server (1) shutdown.Server (0) shutdown. Server is exiting... Server is exiting... Done sampling PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[idtype0] PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[idtype1] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g0] Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:32:21] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 3030 nodes and 6000 edges into 2 parts and get 199 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.003s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.005s, peak mem: 1.529 GB part 0 has 555 nodes of type n1 and 493 are inside the partition part 0 has 550 nodes of type n2 and 486 are inside the partition part 0 has 556 nodes of type n3 and 496 are inside the partition part 0 has 508 edges of type r12 and 473 are inside the partition part 0 has 517 edges of type r13 and 486 are inside the partition part 0 has 494 edges of type r23 and 462 are inside the partition part 1 has 577 nodes of type n1 and 517 are inside the partition part 1 has 567 nodes of type n2 and 514 are inside the partition part 1 has 594 nodes of type n3 and 524 are inside the partition part 1 has 541 edges of type r12 and 517 are inside the partition part 1 has 565 edges of type r13 and 524 are inside the partition part 1 has 574 edges of type r23 and 538 are inside the partition Save partitions: 0.002 seconds, peak memory: 1.529 GB There are 3000 edges in the graph and 0 edge cuts for 2 partitions. FAILED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g1] FAILED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp4-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp4-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp5-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp5-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp0-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp0-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp1-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp1-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp2-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp2-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp4-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp4-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp5-g0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:32:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:19012]... FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp5-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g0] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g1] FAILED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp2-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp2-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp4-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp4-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp5-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp5-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp0-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp0-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp1-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp1-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp2-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp2-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:32:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:19014]... [05:32:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp4-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp4-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp5-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp5-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp0-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp0-g1] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp1-g0] FAILED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp1-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp2-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp2-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp3-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp3-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp5-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp5-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp0-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp0-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp1-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp1-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp2-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp2-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp3-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp3-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp4-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp4-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp5-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp5-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp0-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp0-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp1-g0] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp1-g1] FAILED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp2-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp2-g1] PASSED [ 11%] tests/compute/test_kernel.py::test_copy_edge_reduce FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp3-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp3-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp4-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp4-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp5-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp5-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp0-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp0-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp1-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp1-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp2-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp2-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp3-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp3-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp5-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp5-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g0] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g1] FAILED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp2-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp2-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp5-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp5-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp0-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp0-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp2-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp2-g1] Client [1090] waits on 172.17.0.3:56167 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (1) shutdown.Done sampling Server is exiting...Server (0) shutdown. Server is exiting... Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:32:26] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 630 nodes and 26440 edges into 2 parts and get 5478 edge cuts Metis partitioning: 0.005 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.006s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.001 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.006s, peak mem: 1.529 GB part 0 has 210 nodes of type n1 and 108 are inside the partition part 0 has 200 nodes of type n2 and 100 are inside the partition part 0 has 220 nodes of type n3 and 113 are inside the partition part 0 has 2989 edges of type r12 and 2109 are inside the partition part 0 has 3328 edges of type r13 and 2342 are inside the partition part 0 has 3166 edges of type r23 and 2217 are inside the partition part 1 has 210 nodes of type n1 and 102 are inside the partition part 1 has 200 nodes of type n2 and 100 are inside the partition part 1 has 220 nodes of type n3 and 107 are inside the partition part 1 has 2942 edges of type r12 and 2091 are inside the partition part 1 has 3191 edges of type r13 and 2278 are inside the partition part 1 has 3082 edges of type r23 and 2183 are inside the partition Save partitions: 0.003 seconds, peak memory: 1.529 GB There are 13220 edges in the graph and 0 edge cuts for 2 partitions. FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp3-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp3-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp4-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp4-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp5-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp5-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp0-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp0-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp1-g0] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp1-g1] FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp2-g0] PASSED [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[True] PASSED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_node2vec SKIPPED (GPU random wa...) [ 20%] tests/compute/test_sampling.py::test_pack_traces SKIPPED (GPU pack t...) [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[True] PASSED [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[False] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_noprob FAILED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp2-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp3-g0] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp3-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp4-g0] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp4-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp5-g0] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_prob FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp5-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp0-g0] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp0-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp1-g0] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp1-g1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:32:27] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:27] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18024]... PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_outedge FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp2-g0] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp2-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp3-g0] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp3-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp4-g0] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp4-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp5-g0] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp5-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp0-g0] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp0-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp1-g0] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk SKIPPED (...) [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk_outedge SKIPPED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_with_0deg PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_biased_homogeneous SKIPPED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_biased_bipartite SKIPPED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-coo] SKIPPED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-coo] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csr] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csc] SKIPPED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp1-g1] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp2-g0] FAILED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp2-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp3-g0] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp3-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp4-g0] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] PASSED [ 21%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[True] SKIPPED [ 21%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[False] SKIPPED [ 21%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[True] SKIPPED [ 21%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[False] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[True] SKIPPED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[False] SKIPPED [ 22%] tests/compute/test_serialize.py::test_serialize_tensors PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_empty_dict PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files1 PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files2 PASSED [ 22%] tests/compute/test_serialize.py::test_deserialize_old_heterograph_file PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph SKIPPED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph_s3 SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_single_process[int32] PASSED [ 22%] tests/compute/test_shared_mem.py::test_single_process[int64] PASSED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[int32] PASSED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[int64] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp4-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp5-g0] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp5-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp0-g0] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp0-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g0] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g1] PASSED [ 22%] tests/compute/test_shared_mem.py::test_copy_from_gpu PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag[int32] SKIPPED (GPU s...) [ 22%] tests/compute/test_sort.py::test_sort_with_tag[int64] SKIPPED (GPU s...) [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[int32] SKIPPED [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[int64] SKIPPED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp0-g0] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp0-g1] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp1-g0] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp1-g1] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp2-g0] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp2-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp2-g0] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp2-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp3-g0] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp3-g1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:32:28] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:28] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18026]... [05:32:28] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:28] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp3-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp3-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp4-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp4-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp5-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp5-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp0-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp0-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp1-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp1-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp2-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp2-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp3-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp3-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp4-g0] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp4-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp5-g0] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp5-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp0-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp4-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp4-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp5-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp5-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp0-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp0-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp1-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp1-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp2-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp2-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp3-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp3-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp4-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp4-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp0-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp1-g0] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp1-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp2-g0] PASSED [ 11%] tests/compute/test_kernel.py::test_all_binary_builtins PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp5-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp5-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp0-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp0-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp1-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp2-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp2-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp3-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp3-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp4-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp4-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp5-g0] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp2-g1] FAILED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp3-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp3-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp4-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp4-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp5-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp5-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp0-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp0-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp1-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp2-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp2-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp3-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp3-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp4-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp4-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp5-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp5-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp0-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp5-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp0-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp0-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp1-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp0-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp1-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp2-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp2-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp3-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp3-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp4-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp4-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp5-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp5-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp0-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp0-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp1-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp2-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp2-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp3-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp3-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp4-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp2-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp2-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp3-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp4-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp4-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp5-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp5-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp0-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp1-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp1-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp4-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp5-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp5-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp0-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp2-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp3-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp4-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp4-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp5-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp5-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp0-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp1-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp1-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp1-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp1-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp2-g0] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp2-g1] FAILED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp3-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp2-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp3-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp4-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp4-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp5-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp5-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp0-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp1-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp1-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp3-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp4-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp4-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp5-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp5-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp2-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp3-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp3-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp4-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp4-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp5-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp5-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp0-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp1-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp1-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp2-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp3-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp3-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp4-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp5-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp5-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp0-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp1-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp1-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp2-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp2-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp3-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp3-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp4-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp5-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp5-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp0-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp1-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp1-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp2-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp2-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp3-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp3-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp4-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp5-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp5-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp0-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp0-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp1-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp1-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp2-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp3-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp3-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp4-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp1-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp1-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp2-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp2-g1] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp3-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp0-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp0-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp1-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp1-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp2-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp3-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp3-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp4-g0] FAILED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp3-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp4-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp4-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp5-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp5-g1] Client [1090] waits on 172.17.0.3:45547 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Done sampling Server (1) shutdown. Server is exiting... Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:32:32] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 630 nodes and 26440 edges into 2 parts and get 5478 edge cuts Metis partitioning: 0.005 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.006s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.001 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.005s, peak mem: 1.529 GB part 0 has 210 nodes of type n1 and 108 are inside the partition part 0 has 200 nodes of type n2 and 100 are inside the partition part 0 has 220 nodes of type n3 and 113 are inside the partition part 0 has 2989 edges of type r12 and 2109 are inside the partition part 0 has 3328 edges of type r13 and 2342 are inside the partition part 0 has 3166 edges of type r23 and 2217 are inside the partition part 1 has 210 nodes of type n1 and 102 are inside the partition part 1 has 200 nodes of type n2 and 100 are inside the partition part 1 has 220 nodes of type n3 and 107 are inside the partition part 1 has 2942 edges of type r12 and 2091 are inside the partition part 1 has 3191 edges of type r13 and 2278 are inside the partition part 1 has 3082 edges of type r23 and 2183 are inside the partition Save partitions: 0.003 seconds, peak memory: 1.529 GB There are 13220 edges in the graph and 0 edge cuts for 2 partitions. PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp4-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp5-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp5-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp0-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp0-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp1-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp3-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp3-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp4-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp0-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp0-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp1-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp1-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp4-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp5-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp5-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp0-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp0-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp1-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp3-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp3-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp4-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp4-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp2-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp3-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp3-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp4-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp4-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp5-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp5-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp0-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp1-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp1-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp2-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp3-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp4-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp5-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp5-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp5-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp5-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp0-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp0-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp0-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp1-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp1-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp2-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp3-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp4-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp5-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp0-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp1-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp1-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp2-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp2-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp3-g0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:32:33] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:33] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18024]... PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp0-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp1-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp1-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp2-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp3-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp4-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp5-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp0-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp0-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp1-g0] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp3-g1] FAILED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp4-g0] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp4-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp5-g0] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp5-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp1-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp2-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp3-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp4-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp0-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp1-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp0-g0] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp0-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp1-g0] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp1-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp2-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp2-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp2-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp3-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp3-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp4-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp0-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp1-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp2-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp2-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp2-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp3-g0] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp3-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp4-g0] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp3-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp3-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp4-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp0-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp0-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp1-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp2-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp2-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp3-g1] [05:32:34] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:34] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp5-g0] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp5-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp0-g0] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp0-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp4-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp5-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp5-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp0-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp0-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp1-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp2-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp2-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp3-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp4-g0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:32:34] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:34] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18026]... FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp1-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp2-g0] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp2-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp4-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp5-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp5-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp1-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp1-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp2-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp3-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp3-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp4-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp4-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp3-g1] FAILED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp4-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp4-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp5-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp5-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp5-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp5-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp1-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp1-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp2-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp3-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp3-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp4-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp4-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp0-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp1-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp1-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp5-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp5-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp0-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp1-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp2-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp2-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp3-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp3-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp4-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp4-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp2-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp3-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp3-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp4-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp5-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp5-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp0-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp0-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp1-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp2-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp2-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp3-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp3-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp4-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp5-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp5-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp5-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp0-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp0-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp5-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp0-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp0-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp1-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp2-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp3-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp4-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp4-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp5-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp5-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp0-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp1-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp2-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp2-g1] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp3-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp0-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp1-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp2-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp3-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp4-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp4-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp5-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp5-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp0-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp4-g0] FAILED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp4-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp5-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp5-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp0-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp0-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp1-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp2-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp3-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp4-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp5-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp5-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp0-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp0-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp0-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp1-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp1-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp2-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp1-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp2-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp3-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp4-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp5-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp5-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp0-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp3-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp3-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp4-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp4-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp5-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp0-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp1-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp2-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp2-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp3-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp4-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp4-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp5-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp5-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp0-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp5-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp0-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp0-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp0-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp1-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp2-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp2-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp3-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp4-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp4-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp5-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp5-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp0-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp0-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp1-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp2-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp2-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp3-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp3-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp4-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp5-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp5-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp1-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp3-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp4-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp1-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp3-g1] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp4-g0] FAILED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp4-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp5-g0] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp5-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp0-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp0-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp1-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp3-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp0-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp1-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp3-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp2-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp2-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp1-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp0-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp1-g0] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp1-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp2-g0] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp2-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp3-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp4-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp3-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp4-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp3-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp4-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp1-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp1-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp2-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp4-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp4-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp3-g0] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp3-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp4-g0] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp4-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp5-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp0-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp2-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp4-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp4-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp0-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp2-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp0-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp1-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp0-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp1-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp0-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp2-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp5-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp0-g0] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp0-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp1-g0] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp4-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp4-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp0-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp1-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp1-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp0-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp1-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp1-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp3-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp1-g0] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp2-g0] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp2-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp3-g0] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp3-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp3-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp0-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp1-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp0-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp1-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp1-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp4-g1] FAILED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp5-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp5-g1] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp0-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp0-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp0-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp1-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp0-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp0-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp1-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp2-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp2-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp4-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp1-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp1-g1] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp2-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp2-g1] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp0-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp0-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp2-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp2-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp4-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp4-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp0-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp4-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp4-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp0-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp0-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp1-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp2-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp2-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp0-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp1-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp2-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp2-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp0-g1] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp3-g1] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp4-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp4-g1] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp5-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp5-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp4-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp4-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp1-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp1-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp2-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp1-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp1-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp2-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp3-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp4-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp3-g0] PASSED [ 60%]FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g1] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp1-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp1-g1] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp2-g0] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp4-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp4-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp0-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp1-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp2-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp4-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp0-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp1-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp2-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp4-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp0-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp2-g1] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp3-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp3-g1] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp4-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp0-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp1-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp0-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp0-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp1-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp2-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp3-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp2-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp3-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp3-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp4-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp0-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp1-g1] PASSED [ 65%]FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g0] FAILED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp0-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp0-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g0] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp3-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp4-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp0-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp3-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp2-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp3-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp2-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp3-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp3-g1] PASSED [ 66%]FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp2-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp2-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp3-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp3-g1] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp4-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp4-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp0-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp1-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp2-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp2-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp0-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp1-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp2-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp2-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp4-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp4-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp5-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp5-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp0-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp3-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp4-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp4-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp3-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp4-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp4-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp0-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp1-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp0-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp1-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp0-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp0-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp0-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp1-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp1-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp4-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp0-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp1-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp4-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp0-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp1-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp4-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp2-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp3-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp3-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp4-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp4-g1] Client [1090] waits on 172.17.0.3:57707 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp4-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp4-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp0-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp4-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp0-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp4-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp3-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp1-g1] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp5-g0] FAILED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp5-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp0-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp0-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp3-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp3-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp4-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp4-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp0-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp3-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp4-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp4-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp0-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp3-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp1-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp2-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp3-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp1-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp2-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp3-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp4-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp1-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp2-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp2-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp3-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp3-g1] Done sampling Server (1) shutdown. Server is exiting... Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:32:41] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 630 nodes and 23980 edges into 2 parts and get 4875 edge cuts Metis partitioning: 0.006 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.007s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.001 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.005s, peak mem: 1.529 GB part 0 has 200 nodes of type n1 and 108 are inside the partition part 0 has 190 nodes of type n2 and 101 are inside the partition part 0 has 212 nodes of type n3 and 113 are inside the partition part 0 has 2824 edges of type r12 and 2054 are inside the partition part 0 has 3108 edges of type r13 and 2219 are inside the partition part 0 has 2907 edges of type r23 and 2078 are inside the partition part 1 has 210 nodes of type n1 and 102 are inside the partition part 1 has 200 nodes of type n2 and 99 are inside the partition part 1 has 218 nodes of type n3 and 107 are inside the partition part 1 has 2530 edges of type r12 and 1746 are inside the partition part 1 has 2809 edges of type r13 and 1981 are inside the partition part 1 has 2687 edges of type r23 and 1912 are inside the partition Save partitions: 0.003 seconds, peak memory: 1.529 GB There are 11990 edges in the graph and 0 edge cuts for 2 partitions. PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp3-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp4-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp4-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp3-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp4-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp4-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp1-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp2-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp1-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp4-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp4-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp5-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp5-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp2-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp3-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp3-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp4-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp4-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp3-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp3-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp4-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp4-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp0-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp0-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp1-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp1-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp2-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp0-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp0-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp1-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp1-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp2-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp4-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp0-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp1-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp1-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp2-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp0-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp2-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp3-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp3-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp4-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp2-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp3-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp3-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp4-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp2-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp3-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp4-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp0-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp0-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp1-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp1-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp2-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp2-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp3-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp4-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp0-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp0-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp1-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp1-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp2-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp2-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp3-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp3-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp3-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp4-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp4-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp4-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp4-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp0-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp1-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp4-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp0-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp1-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp2-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp3-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp2-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp3-g1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:32:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18024]... FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp5-g0] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp5-g1] FAILED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp0-g0] FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp0-g1] FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp4-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp4-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp2-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp2-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp2-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp2-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp3-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp4-g0] FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp1-g1] FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp2-g0] FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp2-g1] FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp3-g0] FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp3-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp4-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp0-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp3-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp4-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp4-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp0-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp0-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp0-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp1-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp2-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp2-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp3-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp0-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp0-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp1-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp2-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp2-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp3-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp0-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp0-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp1-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp1-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp2-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp3-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp3-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp4-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp4-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp0-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp0-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp1-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp1-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp2-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp3-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp3-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp4-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp4-g1] PASSED [ 91%]FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp4-g0] FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp4-g1] FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp5-g0] FAILED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp5-g1] FAILED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp1-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp3-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp4-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp0-g0] tests/compute/test_sparse.py::test_edge_softmax[int32-shp0-src-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp0-dst-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp1-src-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp1-dst-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp2-src-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp2-dst-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp0-src-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp0-dst-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp1-src-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp1-dst-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp2-src-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp2-dst-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[sum] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[max] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[min] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[mean] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-int64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_use_libxsmm_switch SKIPPED (Only ...) [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[int32] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[int64] FAILED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp0-g1] FAILED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp1-g0] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp1-g1] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp2-g0] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[int32] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[int64] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp2-g1] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp3-g0] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp3-g1] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp4-g0] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp4-g1] PASSED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[int32] PASSED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[int64] PASSED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[int32] PASSED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[int64] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp0-g0] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp0-g1] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp1-g0] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp1-g1] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp2-g0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:32:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18026]... [05:32:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[int32] PASSED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[int64] PASSED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[int32] PASSED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[int64] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp2-g1] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp3-g0] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp3-g1] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp4-g0] PASSED [ 95%] tests/compute/test_subgraph.py::test_edge_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[int32] SKIPPED (M...) [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[int64] SKIPPED (M...) [ 95%] tests/compute/test_subgraph.py::test_subgraph1[int32] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph1[int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[int32] PASSED [ 95%]FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp4-g1] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp0-g0] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp0-g1] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp1-g0] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp1-g1] tests/compute/test_subgraph.py::test_in_subgraph[int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[int32] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_message_passing PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[int32] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[int32] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device0] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device1] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device2] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device3] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp2-g0] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp2-g1] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp3-g0] FAILED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp3-g1] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp4-g0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device0] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device1] PASSED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device2] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device3] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[int32-device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[int32-device1] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[int64-device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[int64-device1] SKIPPED [ 96%] tests/compute/test_transform.py::test_line_graph1 PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[int32] PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[int64] PASSED [ 96%] tests/compute/test_transform.py::test_no_backtracking PASSED [ 96%] tests/compute/test_transform.py::test_reverse[int32] PASSED [ 96%] tests/compute/test_transform.py::test_reverse[int64] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[int32] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[int64] PASSED [ 96%] tests/compute/test_transform.py::test_to_bidirected SKIPPED (GPU not...) [ 96%] tests/compute/test_transform.py::test_add_reverse_edges PASSED [ 96%] tests/compute/test_transform.py::test_simple_graph SKIPPED (GPU not ...) [ 96%] tests/compute/test_transform.py::test_khop_graph SKIPPED (GPU not im...) [ 96%] tests/compute/test_transform.py::test_khop_adj SKIPPED (GPU not impl...) [ 96%] tests/compute/test_transform.py::test_laplacian_lambda_max SKIPPED (...) [ 97%] tests/compute/test_transform.py::test_partition_with_halo SKIPPED (G...) [ 97%] tests/compute/test_transform.py::test_metis_partition[int32] SKIPPED [ 97%] tests/compute/test_transform.py::test_metis_partition[int64] SKIPPED [ 97%] tests/compute/test_transform.py::test_reorder_nodes SKIPPED (It does...) [ 97%] tests/compute/test_transform.py::test_compact[int32] PASSED [ 97%] tests/compute/test_transform.py::test_compact[int64] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp4-g1] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp2-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp0-g0] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp0-g1] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp1-g0] PASSED [ 97%] tests/compute/test_transform.py::test_to_simple[int32] SKIPPED (GPU ...) [ 97%] tests/compute/test_transform.py::test_to_simple[int64] SKIPPED (GPU ...) [ 97%] tests/compute/test_transform.py::test_to_block[int32] PASSED [ 97%] tests/compute/test_transform.py::test_to_block[int64] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[int32] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[int64] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp1-g1] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp2-g0] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp2-g1] PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[int32] PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[int64] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[int32] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[int64] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[int32] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp3-g0] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp3-g1] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp4-g0] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp4-g1] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp0-g0] FAILED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp0-g1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[int64] PASSED [ 97%] tests/compute/test_transform.py::test_add_selfloop[int32] PASSED [ 97%] tests/compute/test_transform.py::test_add_selfloop[int64] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp1-g0] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp1-g1] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp2-g0] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp2-g1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[int32] PASSED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[int64] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[int32] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[int64] PASSED [ 98%] tests/compute/test_transform.py::test_norm_by_dst[int32] PASSED [ 98%] tests/compute/test_transform.py::test_norm_by_dst[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[int32] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp3-g0] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp3-g1] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp4-g0] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp4-g1] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp0-g0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_to_simple[int32] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_to_simple[int64] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_line_graph[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_line_graph[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_ppr[int32] SKIPPED (Onl...) [ 98%] tests/compute/test_transform.py::test_module_ppr[int64] SKIPPED (Onl...) [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[int32] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[int64] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_gdc[int32] SKIPPED (Onl...) [ 99%] tests/compute/test_transform.py::test_module_gdc[int64] SKIPPED (Onl...) [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[int32] PASSED [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[int64] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_node[int32] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_node[int64] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[int32] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[int64] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_add_edge[int32] PASSED [ 99%] tests/compute/test_transform.py::test_module_add_edge[int64] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[int32] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[int64] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[int32] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[int64] PASSED [ 99%] tests/compute/test_transform.py::test_module_sign[g0] SKIPPED (Only ...) [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[int32] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[int64] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[int32] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[int64] SKIPPED [ 99%] tests/compute/test_traversal.py::test_bfs[int32] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp0-g1] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp1-g0] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp1-g1] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp2-g0] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp2-g1] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp3-g0] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp3-g1] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp4-g0] FAILED [ 43%]PASSED [ 99%] tests/compute/test_traversal.py::test_bfs[int64] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp4-g1] FAILED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp1-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp0-g0] FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp0-g1] FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp1-g0] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[int32] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[int64] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[int32] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[int64] PASSED [100%] =============================== warnings summary =============================== python/dgl/backend/backend.py:1741 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/backend.py:1741: DeprecationWarning: invalid escape sequence \P """ ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/utils.py:37 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/utils.py:37: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations bool = onp.bool ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:143 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:143: DeprecationWarning: In accordance with NEP 32, the function mirr was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial mirr = onp.mirr ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:160 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:160: DeprecationWarning: In accordance with NEP 32, the function npv was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial npv = onp.npv ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:164 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:164: DeprecationWarning: In accordance with NEP 32, the function pmt was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial pmt = onp.pmt ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:173 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:173: DeprecationWarning: In accordance with NEP 32, the function ppmt was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial ppmt = onp.ppmt ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:176 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:176: DeprecationWarning: In accordance with NEP 32, the function pv was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial pv = onp.pv ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:177 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:177: DeprecationWarning: In accordance with NEP 32, the function rate was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial rate = onp.rate ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:48 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:48: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. cur_np_ver = LooseVersion(_np.__version__) ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:49 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:49: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. np_1_17_ver = LooseVersion('1.17') ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:68 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:68: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. cur_np_ver = LooseVersion(_np.__version__) ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:69 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:69: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. np_1_15_ver = LooseVersion('1.15') python/dgl/backend/mxnet/tensor.py:15 python/dgl/backend/mxnet/tensor.py:15 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/mxnet/tensor.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(mx.__version__) < LooseVersion("1.6.0"): python/dgl/backend/mxnet/tensor.py:31 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/mxnet/tensor.py:31: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations 'bool' : np.bool} # mxnet does not support bool python/dgl/backend/mxnet/tensor.py:37: 202 warnings tests/compute/test_basics.py: 229 warnings tests/compute/test_batched_graph.py: 432 warnings tests/compute/test_batched_heterograph.py: 310 warnings tests/compute/test_csrmm.py: 332 warnings tests/compute/test_dataloader.py: 50 warnings tests/compute/test_filter.py: 22 warnings tests/compute/test_frame.py: 8 warnings tests/compute/test_graph.py: 1626 warnings tests/compute/test_heterograph.py: 4432 warnings tests/compute/test_index.py: 1 warning tests/compute/test_kernel.py: 2511 warnings tests/compute/test_merge.py: 12 warnings tests/compute/test_nccl.py: 2 warnings tests/compute/test_new_update_all_hetero.py: 56 warnings tests/compute/test_partition.py: 22 warnings tests/compute/test_pickle.py: 16 warnings tests/compute/test_readout.py: 844 warnings tests/compute/test_removal.py: 838 warnings tests/compute/test_sampler.py: 167 warnings tests/compute/test_sampling.py: 2737 warnings tests/compute/test_serialize.py: 2 warnings tests/compute/test_shared_mem.py: 40 warnings tests/compute/test_sparse.py: 2564 warnings tests/compute/test_specialization.py: 598 warnings tests/compute/test_subgraph.py: 666 warnings tests/compute/test_transform.py: 1818 warnings tests/compute/test_traversal.py: 792 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/mxnet/tensor.py:37: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations if dtype == np.bool: python/dgl/backend/mxnet/tensor.py:136: 174 warnings tests/compute/test_basics.py: 225 warnings tests/compute/test_batched_graph.py: 396 warnings tests/compute/test_batched_heterograph.py: 204 warnings tests/compute/test_dataloader.py: 16 warnings tests/compute/test_filter.py: 8 warnings tests/compute/test_frame.py: 1 warning tests/compute/test_graph.py: 2377 warnings tests/compute/test_heterograph.py: 5224 warnings tests/compute/test_index.py: 1 warning tests/compute/test_kernel.py: 2050 warnings tests/compute/test_merge.py: 16 warnings tests/compute/test_new_update_all_hetero.py: 56 warnings tests/compute/test_pickle.py: 24 warnings tests/compute/test_readout.py: 1416 warnings tests/compute/test_removal.py: 662 warnings tests/compute/test_sampling.py: 5106 warnings tests/compute/test_shared_mem.py: 40 warnings tests/compute/test_sparse.py: 2 warnings tests/compute/test_specialization.py: 588 warnings tests/compute/test_subgraph.py: 354 warnings tests/compute/test_transform.py: 902 warnings tests/compute/test_traversal.py: 1563 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/mxnet/tensor.py:136: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations if ty == np.bool: ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _nlv = LooseVersion(_np_version) ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p16 = _nlv < LooseVersion("1.16") ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p17 = _nlv < LooseVersion("1.17") ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p18 = _nlv < LooseVersion("1.18") ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p19 = _nlv < LooseVersion("1.19") ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p20 = _nlv < LooseVersion("1.20") ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. other = LooseVersion(other) ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(_np_version) >= LooseVersion("1.17.0"): tests/compute/test_basics.py: 2 warnings tests/compute/test_filter.py: 1 warning tests/compute/test_graph.py: 9 warnings tests/compute/test_kernel.py: 3 warnings tests/compute/test_removal.py: 16 warnings tests/compute/test_specialization.py: 12 warnings tests/compute/test_subgraph.py: 2 warnings tests/compute/test_transform.py: 6 warnings tests/compute/test_traversal.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/compute/test_basics.py: 2 warnings tests/compute/test_batched_graph.py: 10 warnings tests/compute/test_graph.py: 2 warnings tests/compute/test_kernel.py: 1 warning tests/compute/test_removal.py: 10 warnings tests/compute/test_specialization.py: 10 warnings tests/compute/test_subgraph.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:354: DGLWarning: DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges dgl_warning("DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges") tests/compute/test_basics.py::test_update_all_0deg[int32] tests/compute/test_basics.py::test_update_all_0deg[int64] tests/compute/test_basics.py::test_pull_0deg[int32] tests/compute/test_basics.py::test_pull_0deg[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/core.py:79: DGLWarning: The input graph for the user-defined edge function does not contain valid edges dgl_warning('The input graph for the user-defined edge function ' \ tests/compute/test_batched_graph.py::test_batched_edge_ordering[int32] tests/compute/test_batched_graph.py::test_batched_edge_ordering[int64] tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_transform.py::test_no_backtracking tests/compute/test_transform.py::test_reverse[int32] tests/compute/test_transform.py::test_reverse[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2978: DGLWarning: DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids. dgl_warning("DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids.") tests/compute/test_batched_heterograph.py::test_features[int32] tests/compute/test_batched_heterograph.py::test_features[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/batch.py:159: DGLWarning: Arguments edge_attrs has been deprecated. Please use edata instead. dgl_warning('Arguments edge_attrs has been deprecated. Please use' tests/compute/test_csrmm.py::test_csrmm[float32-int32] tests/compute/test_csrmm.py::test_csrmm[float32-int64] tests/compute/test_csrmm.py::test_csrmm[float64-int32] tests/compute/test_csrmm.py::test_csrmm[float64-int64] tests/compute/test_csrmm.py::test_csrsum[float32-int32] tests/compute/test_csrmm.py::test_csrsum[float32-int64] tests/compute/test_csrmm.py::test_csrsum[float64-int32] tests/compute/test_csrmm.py::test_csrsum[float64-int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph_index.py:797: FutureWarning: Adjacency matrix by default currently returns edge IDs. As a result there is one 0 entry which is not eliminated. In the next release it will return 1s by default, and 0 will be eliminated otherwise. FutureWarning) tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_heterograph.py::test_query[int32] tests/compute/test_heterograph.py::test_query[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2753: DGLWarning: DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes dgl_warning("DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2687: DGLWarning: DGLGraph.__contains__ is deprecated. Please directly call has_nodes. dgl_warning('DGLGraph.__contains__ is deprecated.' tests/compute/test_graph.py::test_query tests/compute/test_sampling.py::test_non_uniform_random_walk[False] tests/compute/test_sampling.py::test_uniform_random_walk[True] tests/compute/test_sampling.py::test_uniform_random_walk[False] tests/compute/test_transform.py::test_no_backtracking /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2851: DGLWarning: DGLGraph.has_edge_between is deprecated. Please use DGLGraph.has_edges_between dgl_warning("DGLGraph.has_edge_between is deprecated. " tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:3432: DGLWarning: DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees dgl_warning("DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:3516: DGLWarning: DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees dgl_warning("DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees") tests/compute/test_graph.py::test_query /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly', 'sort_csr'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) tests/compute/test_heterograph.py: 352 warnings tests/compute/test_kernel.py: 1766 warnings tests/compute/test_new_update_all_hetero.py: 56 warnings tests/compute/test_readout.py: 144 warnings tests/compute/test_removal.py: 8 warnings tests/compute/test_sampler.py: 22 warnings tests/compute/test_sampling.py: 12 warnings tests/compute/test_sparse.py: 2560 warnings tests/compute/test_subgraph.py: 10 warnings tests/compute/test_transform.py: 52 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/mxnet/tensor.py:50: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations dtype = np.int32 if data.dtype == np.bool else data.dtype tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648/tests/compute/test_heterograph.py:1128: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(src_i)) == nid[src[i]] tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648/tests/compute/test_heterograph.py:1129: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(dst_i)) == nid[dst[i]] tests/compute/test_heterograph.py::test_invertible_conversion[int32] tests/compute/test_heterograph.py::test_invertible_conversion[int64] tests/compute/test_shared_mem.py::test_single_process[int32] tests/compute/test_shared_mem.py::test_single_process[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:2635: DGLWarning: DGLGraph.is_readonly is deprecated in v0.5. DGLGraph now always supports mutable operations like add_nodes and add_edges. dgl_warning('DGLGraph.is_readonly is deprecated in v0.5.\n' tests/compute/test_kernel.py: 3 warnings tests/compute/test_sparse.py: 290 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/mxnet/sparse.py:17: DGLWarning: MXNet do not support scatter_add, fallback to numpy. dgl_warning("MXNet do not support scatter_add, fallback to numpy.") tests/compute/test_partition.py::test_get_node_partition_from_book[int32] tests/compute/test_partition.py::test_get_node_partition_from_book[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648/python/dgl/contrib/sampling/sampler.py:317: DGLWarning: dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5.' tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_layer_sampler tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648/python/dgl/_deprecate/nodeflow.py:99: DGLWarning: NodeFlow APIs are deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('NodeFlow APIs are deprecated starting from v0.5. Please read our' tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/sampling/negative.py:102: ComplexWarning: Casting complex values to real discards the imaginary part g._graph, etype_id, num_samples, 3, exclude_self_loops, replace, redundancy) tests/compute/test_serialize.py::test_load_old_files1 tests/compute/test_serialize.py::test_load_old_files2 /root/jenkins/workspace/dgl_PR-4648/python/dgl/data/graph_serialize.py:179: DGLWarning: You are loading a graph file saved by old version of dgl. Please consider saving it again with the current format. Please consider saving it again with the current format.") tests/compute/test_transform.py::test_reverse_shared_frames[int32] tests/compute/test_transform.py::test_reverse_shared_frames[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/transforms/functional.py:1267: DGLWarning: share_ndata argument has been renamed to copy_ndata. dgl_warning('share_ndata argument has been renamed to copy_ndata.') tests/compute/test_transform.py::test_reverse_shared_frames[int32] tests/compute/test_transform.py::test_reverse_shared_frames[int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/transforms/functional.py:1270: DGLWarning: share_edata argument has been renamed to copy_edata. dgl_warning('share_edata argument has been renamed to copy_edata.') -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html -- generated xml file: /root/jenkins/workspace/dgl_PR-4648/pytest_compute.xml -- ============================ slowest 100 durations ============================= 19.59s call tests/compute/test_sampler.py::test_negative_sampler 8.74s call tests/compute/test_kernel.py::test_all_binary_builtins 1.20s call tests/compute/test_graph.py::test_query 1.02s call tests/compute/test_sampling.py::test_non_uniform_random_walk[False] 0.75s call tests/compute/test_heterograph.py::test_query[int64] 0.69s call tests/compute/test_heterograph.py::test_query[int32] 0.65s call tests/compute/test_sampling.py::test_uniform_random_walk[False] 0.50s call tests/compute/test_sampling.py::test_uniform_random_walk[True] 0.48s call tests/compute/test_sampling.py::test_sample_neighbors_outedge 0.34s call tests/compute/test_traversal.py::test_bfs[int32] 0.34s call tests/compute/test_basics.py::test_compatible 0.32s call tests/compute/test_basics.py::test_batch_setter_getter[int32] 0.31s call tests/compute/test_traversal.py::test_bfs[int64] 0.29s call tests/compute/test_heterograph.py::test_view1[int32] 0.27s call tests/compute/test_heterograph.py::test_view1[int64] 0.26s call tests/compute/test_sampling.py::test_sample_neighbors_prob 0.26s call tests/compute/test_sampling.py::test_sample_neighbors_noprob 0.23s call tests/compute/test_kernel.py::test_copy_edge_reduce 0.22s call tests/compute/test_kernel.py::test_copy_src_reduce 0.17s call tests/compute/test_heterograph.py::test_updates[int32] 0.17s call tests/compute/test_heterograph.py::test_updates[int64] 0.17s call tests/compute/test_removal.py::test_node_and_edge_removal[int32] 0.16s call tests/compute/test_removal.py::test_node_and_edge_removal[int64] 0.15s call tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp0-g0] 0.15s call tests/compute/test_sampler.py::test_prefetch_neighbor_sampler 0.13s call tests/compute/test_sampler.py::test_1neighbor_sampler 0.12s call tests/compute/test_sampler.py::test_10neighbor_sampler 0.12s call tests/compute/test_transform.py::test_remove_nodes[int32] 0.12s call tests/compute/test_transform.py::test_remove_nodes[int64] 0.12s call tests/compute/test_heterograph.py::test_format[int64] 0.12s call tests/compute/test_heterograph.py::test_format[int32] 0.11s call tests/compute/test_heterograph.py::test_level2[int32] 0.11s call tests/compute/test_heterograph.py::test_level2[int64] 0.11s call tests/compute/test_transform.py::test_remove_edges[int32] 0.10s call tests/compute/test_specialization.py::test_v2v_snr[int32] 0.10s call tests/compute/test_sampler.py::test_1neighbor_sampler_all 0.10s call tests/compute/test_specialization.py::test_v2v_snr[int64] 0.10s call tests/compute/test_transform.py::test_remove_edges[int64] 0.10s call tests/compute/test_shared_mem.py::test_multi_process[int32] 0.09s call tests/compute/test_specialization.py::test_v2v_pull[int32] 0.09s call tests/compute/test_shared_mem.py::test_copy_from_gpu 0.09s call tests/compute/test_specialization.py::test_v2v_pull[int64] 0.09s call tests/compute/test_shared_mem.py::test_multi_process[int64] 0.08s call tests/compute/test_specialization.py::test_pull_multi_fallback[int32] 0.08s call tests/compute/test_specialization.py::test_pull_multi_fallback[int64] 0.07s call tests/compute/test_specialization.py::test_v2v_update_all[int32] 0.07s call tests/compute/test_heterograph.py::test_convert[int64] 0.07s call tests/compute/test_specialization.py::test_v2v_update_all[int64] 0.07s call tests/compute/test_heterograph.py::test_flatten[int32] 0.07s call tests/compute/test_heterograph.py::test_convert[int32] 0.07s call tests/compute/test_heterograph.py::test_flatten[int64] 0.06s call tests/compute/test_subgraph.py::test_subgraph1[int32] 0.06s call tests/compute/test_subgraph.py::test_subgraph1[int64] 0.06s call tests/compute/test_heterograph.py::test_float_cast 0.06s call tests/compute/test_transform.py::test_add_edges[int32] 0.06s call tests/compute/test_transform.py::test_add_edges[int64] 0.05s call tests/compute/test_sampler.py::test_layer_sampler 0.05s call tests/compute/test_heterograph.py::test_add_edges[int32] 0.05s call tests/compute/test_heterograph.py::test_add_edges[int64] 0.05s call tests/compute/test_specialization.py::test_spmv_3d_feat[int64] 0.05s call tests/compute/test_heterograph.py::test_remove_nodes[int32] 0.05s call tests/compute/test_removal.py::test_edge_removal[int32] 0.05s call tests/compute/test_nccl.py::test_nccl_sparse_push_single_remainder 0.05s call tests/compute/test_heterograph.py::test_remove_nodes[int64] 0.05s call tests/compute/test_transform.py::test_add_selfloop[int32] 0.05s call tests/compute/test_transform.py::test_add_selfloop[int64] 0.05s call tests/compute/test_transform.py::test_to_block[int32] 0.05s call tests/compute/test_transform.py::test_to_block[int64] 0.04s call tests/compute/test_removal.py::test_edge_removal[int64] 0.04s call tests/compute/test_specialization.py::test_spmv_3d_feat[int32] 0.04s call tests/compute/test_heterograph.py::test_types_in_function[int32] 0.04s call tests/compute/test_heterograph.py::test_types_in_function[int64] 0.04s call tests/compute/test_nccl.py::test_nccl_sparse_pull_single_remainder 0.04s call tests/compute/test_subgraph.py::test_khop_out_subgraph[int64] 0.04s call tests/compute/test_subgraph.py::test_khop_out_subgraph[int32] 0.04s call tests/compute/test_transform.py::test_add_reverse_edges 0.04s call tests/compute/test_heterograph.py::test_create[int32] 0.04s call tests/compute/test_specialization.py::test_update_all_multi_fallback[int32] 0.04s call tests/compute/test_specialization.py::test_update_all_multi_fallback[int64] 0.04s call tests/compute/test_batched_heterograph.py::test_slice_batch[int32] 0.04s call tests/compute/test_batched_heterograph.py::test_slice_batch[int64] 0.04s call tests/compute/test_heterograph.py::test_create[int64] 0.04s call tests/compute/test_nccl.py::test_nccl_sparse_pull_single_range 0.04s call tests/compute/test_transform.py::test_no_backtracking 0.04s call tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp0-g0] 0.03s call tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-int32] 0.03s call tests/compute/test_batched_graph.py::test_batch_propagate[int32] 0.03s call tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-int64] 0.03s call tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-int64] 0.03s call tests/compute/test_batched_graph.py::test_batch_propagate[int64] 0.03s call tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-int32] 0.03s call tests/compute/test_transform.py::test_reorder_graph[int32] 0.03s call tests/compute/test_readout.py::test_reduce_readout[max-g1-int32] 0.03s call tests/compute/test_readout.py::test_reduce_readout[max-g1-int64] 0.03s call tests/compute/test_readout.py::test_reduce_readout[mean-g1-int64] 0.03s call tests/compute/test_subgraph.py::test_khop_in_subgraph[int32] 0.03s call tests/compute/test_readout.py::test_reduce_readout[mean-g1-int32] 0.03s call tests/compute/test_transform.py::test_reorder_graph[int64] 0.03s call tests/compute/test_nccl.py::test_nccl_sparse_push_single_range 0.03s call tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler ========= 2229 passed, 224 skipped, 48231 warnings in 72.02s (0:01:12) ========= FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp1-g1] FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp2-g0] FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp2-g1] FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp3-g0] Client [1090] waits on 172.17.0.3:49037 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... Done sampling Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:32:46] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 1500 nodes and 2000 edges into 2 parts and get 24 edge cuts Metis partitioning: 0.001 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.002s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.001 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.003s, peak mem: 1.529 GB part 0 has 519 nodes of type game and 511 are inside the partition part 0 has 266 nodes of type user and 252 are inside the partition part 0 has 497 edges of type buys and 489 are inside the partition part 1 has 505 nodes of type game and 489 are inside the partition part 1 has 256 nodes of type user and 248 are inside the partition part 1 has 527 edges of type buys and 511 are inside the partition Save partitions: 0.002 seconds, peak memory: 1.529 GB There are 1000 edges in the graph and 0 edge cuts for 2 partitions. FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp3-g1] FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp4-g0] FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp4-g1] FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp0-g0] Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: FAILED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp0-g1] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp1-g0] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp1-g1] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp2-g0] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp2-g1] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp3-g0] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp3-g1] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp4-g0] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp4-g1] ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/mxnet-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648 collecting ... FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp0-g0] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp0-g1] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp1-g0] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp1-g1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:32:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18024]... FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp2-g0] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp2-g1] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp3-g0] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp3-g1] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp4-g0] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp4-g1] FAILED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp0-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp0-g0] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp0-g1] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp1-g0] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp1-g1] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp2-g0] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp2-g1] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp3-g0] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp3-g1] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp4-g0] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp4-g1] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp0-g0] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp0-g1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:32:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18026]... [05:32:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp1-g0] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp1-g1] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp2-g0] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp2-g1] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp3-g0] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp3-g1] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp4-g0] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp4-g1] FAILED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp0-g0] FAILED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp0-g1] FAILED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp1-g0] FAILED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp1-g1] FAILED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp2-g0] FAILED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp2-g1] FAILED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp3-g0] FAILED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp3-g1] FAILED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp4-g0] FAILED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp4-g1] FAILED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp4-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp0-g0] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp0-g1] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp1-g0] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp1-g1] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp2-g0] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp2-g1] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp3-g0] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp3-g1] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp4-g0] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp4-g1] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp0-g0] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp0-g1] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp1-g0] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp1-g1] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp2-g0] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp2-g1] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp3-g0] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp3-g1] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp4-g0] FAILED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp4-g1] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp0-g0] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp0-g1] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp1-g0] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp1-g1] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp2-g0] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp2-g1] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp3-g0] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp3-g1] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp4-g0] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp4-g1] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp3-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp0-g0] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp0-g1] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g0] FAILED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp2-g0] Client [1090] waits on 172.17.0.3:36771 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp2-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp3-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp3-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp4-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp4-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp0-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp0-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp1-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp1-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp2-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp2-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp3-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp3-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp4-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp4-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp0-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp0-g1] Server (1) shutdown. Server is exiting... Done sampling Server (0) shutdown. Server is exiting... Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:32:53] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 1500 nodes and 2000 edges into 2 parts and get 23 edge cuts Metis partitioning: 0.001 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.002s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.001 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.003s, peak mem: 1.529 GB part 0 has 506 nodes of type game and 496 are inside the partition part 0 has 256 nodes of type user and 244 are inside the partition part 0 has 533 edges of type buys and 522 are inside the partition part 1 has 516 nodes of type game and 504 are inside the partition part 1 has 267 nodes of type user and 256 are inside the partition part 1 has 490 edges of type buys and 478 are inside the partition Save partitions: 0.002 seconds, peak memory: 1.529 GB There are 1000 edges in the graph and 0 edge cuts for 2 partitions. FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp1-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp1-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp2-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp2-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp3-g0] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp3-g1] FAILED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp4-g0] FAILED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp4-g1] FAILED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp0-g0] FAILED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp0-g1] FAILED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp1-g0] FAILED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp1-g1] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp2-g0] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp2-g1] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp3-g0] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp3-g1] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp4-g0] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp4-g1] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp0-g0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:32:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:27870]... FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp0-g1] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp1-g0] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp1-g1] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp2-g0] collected 806 items tests/mxnet/test_geometry.py::test_fps PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv[1-int32] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv[1-int64] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv[2-int32] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv[2-int64] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp2-g1] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp3-g0] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp3-g1] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp4-g0] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp4-g1] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g0-int32] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g0-int64] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g1-int32] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g1-int64] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g2-int32] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g2-int64] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g3-int32] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g3-int64] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g4-int32] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g4-int64] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g5-int32] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g5-int64] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g6-int32] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g6-int64] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g0-int32] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g0-int64] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g1-int32] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g1-int64] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g2-int32] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g2-int64] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g3-int32] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g3-int64] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g4-int32] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g4-int64] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g5-int32] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g5-int64] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g6-int32] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g6-int64] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g0-int32] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g0-int64] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g1-int32] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g1-int64] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g2-int32] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g2-int64] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g3-int32] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g3-int64] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g4-int32] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g4-int64] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g5-int32] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g5-int64] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g6-int32] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g6-int64] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g0-int32] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g0-int64] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g1-int32] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g1-int64] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g2-int32] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g2-int64] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g3-int32] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g3-int64] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g4-int32] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g4-int64] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g5-int32] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g5-int64] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g6-int32] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g6-int64] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g0-int32] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g0-int64] PASSED [ 7%]FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp0-g0] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp0-g1] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp1-g0] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp1-g1] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g1-int32] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g1-int64] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g2-int32] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g2-int64] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g3-int32] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g3-int64] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g4-int32] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g4-int64] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g5-int32] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g5-int64] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g6-int32] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g6-int64] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g0-int32] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g0-int64] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g1-int32] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g1-int64] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g2-int32] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g2-int64] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g3-int32] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g3-int64] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g4-int32] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g4-int64] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g5-int32] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g5-int64] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g6-int32] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g6-int64] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g0-int32] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g0-int64] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g1-int32] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g1-int64] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g2-int32] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g2-int64] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g3-int32] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g3-int64] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g4-int32] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g4-int64] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g5-int32] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g5-int64] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g6-int32] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g6-int64] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g0-int32] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g0-int64] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g1-int32] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g1-int64] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g2-int32] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g2-int64] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g3-int32] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g3-int64] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g4-int32] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g4-int64] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g5-int32] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g5-int64] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g6-int32] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g6-int64] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g0-int32] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g0-int64] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g1-int32] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g1-int64] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g2-int32] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g2-int64] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g3-int32] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g3-int64] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g4-int32] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g4-int64] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g5-int32] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g5-int64] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp2-g0] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp2-g1] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp3-g0] FAILED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp3-g1] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp4-g0] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g6-int32] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g6-int64] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g0-int32] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g0-int64] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g1-int32] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g1-int64] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g2-int32] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g2-int64] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g3-int32] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g3-int64] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g4-int32] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g4-int64] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g5-int32] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g5-int64] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g6-int32] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g6-int64] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g0-int32] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g0-int64] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g1-int32] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g1-int64] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g2-int32] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g2-int64] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g3-int32] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g3-int64] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g4-int32] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g4-int64] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g5-int32] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g5-int64] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g6-int32] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g6-int64] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g0-int32] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g0-int64] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g1-int32] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g1-int64] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g2-int32] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g2-int64] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g3-int32] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g3-int64] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g4-int32] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g4-int64] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g5-int32] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g5-int64] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g6-int32] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g6-int64] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g0-int32] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g0-int64] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g1-int32] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g1-int64] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g2-int32] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g2-int64] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g3-int32] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g3-int64] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g4-int32] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g4-int64] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g5-int32] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g5-int64] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g6-int32] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g6-int64] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g0-int32] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g0-int64] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp4-g1] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp2-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp0-g0] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp0-g1] FAILED [ 53%]PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g1-int32] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g1-int64] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g2-int32] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g2-int64] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g3-int32] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g3-int64] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g4-int32] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g4-int64] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g5-int32] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g5-int64] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g6-int32] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g6-int64] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g0-int32] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g0-int64] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g1-int32] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g1-int64] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g2-int32] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g2-int64] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g3-int32] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g3-int64] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g4-int32] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g4-int64] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g5-int32] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g5-int64] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g6-int32] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g6-int64] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g0-int32] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g0-int64] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g1-int32] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g1-int64] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g2-int32] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g2-int64] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g3-int32] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g3-int64] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g4-int32] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g4-int64] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g5-int32] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g5-int64] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g6-int32] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g6-int64] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-none-g0-int32] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-none-g0-int64] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-none-g1-int32] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-none-g1-int64] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-both-g0-int32] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-both-g0-int64] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-both-g1-int32] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-both-g1-int64] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-right-g0-int32] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-right-g0-int64] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-right-g1-int32] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-right-g1-int64] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-none-g0-int32] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-none-g0-int64] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-none-g1-int32] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-none-g1-int64] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-both-g0-int32] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-both-g0-int64] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-both-g1-int32] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-both-g1-int64] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-right-g0-int32] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-right-g0-int64] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:32:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:27872]... [05:32:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:32:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-right-g1-int32] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-right-g1-int64] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-none-g0-int32] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-none-g0-int64] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-none-g1-int32] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-none-g1-int64] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-both-g0-int32] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-both-g0-int64] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-both-g1-int32] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-both-g1-int64] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-right-g0-int32] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-right-g0-int64] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-right-g1-int32] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-right-g1-int64] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-none-g0-int32] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-none-g0-int64] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-none-g1-int32] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-none-g1-int64] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-both-g0-int32] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-both-g0-int64] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-both-g1-int32] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-both-g1-int64] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-right-g0-int32] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-right-g0-int64] PASSED [ 34%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-right-g1-int32] PASSED [ 34%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-right-g1-int64] PASSED [ 34%] tests/mxnet/test_nn.py::test_tagconv[1] PASSED [ 34%] tests/mxnet/test_nn.py::test_tagconv[2] PASSED [ 34%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g0-int32] PASSED [ 34%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g0-int64] PASSED [ 34%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g1-int32] PASSED [ 34%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g1-int64] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g2-int32] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g2-int64] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g3-int32] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g3-int64] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g4-int32] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g4-int64] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp1-g0] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp1-g1] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp2-g0] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp2-g1] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp3-g0] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g5-int32] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g5-int64] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g6-int32] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g6-int64] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g0-int32] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g0-int64] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g1-int32] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g1-int64] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g2-int32] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g2-int64] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g3-int32] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g3-int64] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g4-int32] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g4-int64] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g5-int32] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g5-int64] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g6-int32] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g6-int64] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp3-g1] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp4-g0] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp4-g1] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp0-g0] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp0-g1] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g0-int32] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g0-int64] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g1-int32] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g1-int64] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g2-int32] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g2-int64] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g3-int32] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g3-int64] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g4-int32] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g4-int64] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g5-int32] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g5-int64] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g6-int32] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g6-int64] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g0-int32] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g0-int64] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g1-int32] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g1-int64] FAILED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp1-g0] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp1-g1] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp2-g0] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp2-g1] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp3-g0] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g2-int32] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g2-int64] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g3-int32] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g3-int64] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g4-int32] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g4-int64] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g5-int32] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g5-int64] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g6-int32] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g6-int64] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g0-int32] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g0-int64] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g1-int32] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g1-int64] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g0-int32] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g0-int64] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g1-int32] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp3-g1] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp4-g0] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp4-g1] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp0-g0] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp0-g1] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g1-int64] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g0-int32] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g0-int64] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g1-int32] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g1-int64] PASSED [ 43%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g0-int32] PASSED [ 43%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g0-int64] PASSED [ 43%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g1-int32] PASSED [ 43%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g1-int64] PASSED [ 43%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g0-int32] PASSED [ 43%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g0-int64] PASSED [ 43%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g1-int32] PASSED [ 43%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g1-int64] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g2-int32] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g2-int64] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g3-int32] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g3-int64] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g4-int32] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g4-int64] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g5-int32] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g5-int64] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g6-int32] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g6-int64] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g7-int32] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g7-int64] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g0-int32] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g0-int64] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g1-int32] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g1-int64] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g2-int32] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g2-int64] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g3-int32] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g3-int64] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g4-int32] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g4-int64] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g5-int32] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g5-int64] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g6-int32] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g6-int64] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g7-int32] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g7-int64] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp1-g0] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp1-g1] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp2-g0] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp2-g1] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp3-g0] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g0-int32] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g0-int64] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g1-int32] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g1-int64] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g2-int32] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g2-int64] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g3-int32] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g3-int64] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g4-int32] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g4-int64] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g5-int32] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g5-int64] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g6-int32] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g6-int64] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g7-int32] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g7-int64] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g0-int32] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g0-int64] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g1-int32] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g1-int64] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g2-int32] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g2-int64] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g3-int32] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g3-int64] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g4-int32] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g4-int64] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g5-int32] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g5-int64] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g6-int32] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g6-int64] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g7-int32] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g7-int64] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g0-int32] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g0-int64] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g1-int32] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g1-int64] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g2-int32] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g2-int64] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g3-int32] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g3-int64] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g4-int32] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g4-int64] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g5-int32] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g5-int64] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g6-int32] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g6-int64] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g7-int32] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g7-int64] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g0-int32] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g0-int64] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g1-int32] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g1-int64] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp3-g1] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp4-g0] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp4-g1] FAILED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp0-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp2-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp0-g0] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g2-int32] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g2-int64] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g3-int32] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g3-int64] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g4-int32] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g4-int64] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g5-int32] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g5-int64] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g6-int32] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g6-int64] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g7-int32] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g7-int64] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g0-int32] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g0-int64] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g1-int32] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g1-int64] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g2-int32] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g2-int64] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g0-int32] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g0-int64] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g1-int32] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g1-int64] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g2-int32] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g2-int64] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g0-int32] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g0-int64] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g1-int32] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g1-int64] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g2-int32] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g2-int64] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g0-int32] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g0-int64] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g1-int32] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g1-int64] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g2-int32] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g2-int64] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g0-int32] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g0-int64] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g1-int32] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g1-int64] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g2-int32] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g2-int64] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g0-int32] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g0-int64] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g1-int32] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g1-int64] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g2-int32] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g2-int64] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-mean-int32] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-mean-int64] FAILED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp0-g1] FAILED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp1-g0] FAILED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp1-g1] FAILED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp2-g0] FAILED [ 55%]PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-pool-int32] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-pool-int64] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-gcn-int32] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-gcn-int64] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-mean-int32] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-mean-int64] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-pool-int32] PASSED [ 61%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-pool-int64] PASSED [ 61%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-gcn-int32] PASSED [ 61%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-gcn-int64] PASSED [ 61%] tests/mxnet/test_nn.py::test_gg_conv PASSED [ 61%] tests/mxnet/test_nn.py::test_cheb_conv[1] PASSED [ 61%] tests/mxnet/test_nn.py::test_cheb_conv[20] PASSED [ 61%] tests/mxnet/test_nn.py::test_agnn_conv[g0-int32] PASSED [ 61%] tests/mxnet/test_nn.py::test_agnn_conv[g0-int64] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g1-int32] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g1-int64] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g2-int32] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g2-int64] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g3-int32] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g3-int64] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g4-int32] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g4-int64] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv[g5-int32] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv[g5-int64] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv[g6-int32] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv[g6-int64] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv_bi[g0-int32] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv_bi[g0-int64] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp2-g1] FAILED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp3-g0] FAILED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp3-g1] FAILED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp4-g0] FAILED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp4-g1] FAILED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp0-g0] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv_bi[g1-int32] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv_bi[g1-int64] PASSED [ 64%] tests/mxnet/test_nn.py::test_appnp_conv PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_cheb_conv[1] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_cheb_conv[2] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-both-int32] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-both-int64] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-right-int32] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-right-int64] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-none-int32] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-none-int64] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-both-int32] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-both-int64] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-right-int32] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-right-int64] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-none-int32] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-none-int64] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-both-int32] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-both-int64] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-right-int32] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-right-int64] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-none-int32] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-none-int64] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-both-int32] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-both-int64] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-right-int32] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-right-int64] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-none-int32] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-none-int64] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-both-int32] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-both-int64] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-right-int32] FAILED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp0-g1] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp1-g0] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp1-g1] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp2-g0] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-right-int64] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-none-int32] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-none-int64] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-both-int32] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-both-int64] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-right-int32] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-right-int64] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-none-int32] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-none-int64] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-both-int32] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-both-int64] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-right-int32] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-right-int64] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-none-int32] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-none-int64] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-both-int32] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-both-int64] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-right-int32] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-right-int64] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-none-int32] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-none-int64] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-both-int32] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-both-int64] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-right-int32] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-right-int64] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-none-int32] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-none-int64] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-both-int32] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-both-int64] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-right-int32] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-right-int64] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-none-int32] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-none-int64] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-both-int32] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-both-int64] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-right-int32] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-right-int64] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-none-int32] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp2-g1] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp3-g0] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp3-g1] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp4-g0] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp4-g1] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-none-int64] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-both-int32] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-both-int64] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-right-int32] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-right-int64] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-none-int32] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-none-int64] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-both-int32] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-both-int64] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-right-int32] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-right-int64] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-none-int32] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-none-int64] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-both-int32] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-both-int64] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-right-int32] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-right-int64] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-none-int32] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-none-int64] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g0-int32] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g0-int64] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g1-int32] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g1-int64] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g2-int32] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g2-int64] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g3-int32] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g3-int64] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g4-int32] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g4-int64] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g5-int32] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g5-int64] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g6-int32] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g6-int64] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g7-int32] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g7-int64] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g8-int32] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g8-int64] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g9-int32] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g9-int64] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp0-g0] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp0-g1] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp1-g0] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp1-g1] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g0-int32] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g0-int64] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g1-int32] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g1-int64] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g2-int32] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g2-int64] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g3-int32] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g3-int64] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g4-int32] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g4-int64] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g5-int32] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g5-int64] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g6-int32] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g6-int64] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g7-int32] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g7-int64] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g8-int32] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g8-int64] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g9-int32] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g9-int64] PASSED [ 79%] tests/mxnet/test_nn.py::test_edge_conv[1-g0-int32] PASSED [ 79%] tests/mxnet/test_nn.py::test_edge_conv[1-g0-int64] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g1-int32] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g1-int64] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g2-int32] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g2-int64] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g3-int32] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g3-int64] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g4-int32] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g4-int64] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[1-g5-int32] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[1-g5-int64] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[1-g6-int32] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[1-g6-int64] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[2-g0-int32] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[2-g0-int64] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[2-g1-int32] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[2-g1-int64] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g2-int32] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g2-int64] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g3-int32] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g3-int64] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp2-g0] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp2-g1] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp3-g0] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp3-g1] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g4-int32] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g4-int64] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g5-int32] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g5-int64] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv[2-g6-int32] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv[2-g6-int64] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[1-g0-int32] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[1-g0-int64] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[1-g1-int32] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[1-g1-int64] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[2-g0-int32] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[2-g0-int64] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[2-g1-int32] PASSED [ 84%] tests/mxnet/test_nn.py::test_edge_conv_bi[2-g1-int64] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g0-int32] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g0-int64] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g1-int32] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g1-int64] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g2-int32] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g2-int64] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g3-int32] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g3-int64] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g4-int32] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g4-int64] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g5-int32] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g5-int64] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g6-int32] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g6-int64] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g7-int32] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[mean-g7-int64] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g0-int32] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g0-int64] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g1-int32] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g1-int64] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g2-int32] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g2-int64] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g3-int32] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g3-int64] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g4-int32] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g4-int64] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g5-int32] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g5-int64] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g6-int32] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g6-int64] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g7-int32] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[max-g7-int64] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g0-int32] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g0-int64] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g1-int32] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g1-int64] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g2-int32] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g2-int64] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g3-int32] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g3-int64] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g4-int32] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g4-int64] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g5-int32] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g5-int64] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g6-int32] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g6-int64] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g7-int32] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv[sum-g7-int64] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g0-int32] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g0-int64] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g1-int32] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g1-int64] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g2-int32] PASSED [ 90%]FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp4-g0] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp4-g1] FAILED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp0-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp0-g0] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp0-g1] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp1-g0] Client [1090] waits on 172.17.0.3:60789 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g2-int64] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g0-int32] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g0-int64] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g1-int32] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g1-int64] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g2-int32] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g2-int64] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g0-int32] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g0-int64] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g1-int32] PASSED [ 92%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g1-int64] PASSED [ 92%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g2-int32] PASSED [ 92%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g2-int64] PASSED [ 92%] tests/mxnet/test_nn.py::test_gmm_conv[g0-int32] PASSED [ 92%] tests/mxnet/test_nn.py::test_gmm_conv[g0-int64] PASSED [ 92%] tests/mxnet/test_nn.py::test_gmm_conv[g1-int32] PASSED [ 92%] tests/mxnet/test_nn.py::test_gmm_conv[g1-int64] PASSED [ 92%] tests/mxnet/test_nn.py::test_gmm_conv[g2-int32] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g2-int64] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g3-int32] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g3-int64] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g4-int32] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g4-int64] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g5-int32] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g5-int64] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g6-int32] PASSED [ 94%] tests/mxnet/test_nn.py::test_gmm_conv[g6-int64] PASSED [ 94%] tests/mxnet/test_nn.py::test_gmm_conv_bi[g0-int32] PASSED [ 94%] tests/mxnet/test_nn.py::test_gmm_conv_bi[g0-int64] PASSED [ 94%] tests/mxnet/test_nn.py::test_gmm_conv_bi[g1-int32] PASSED [ 94%] tests/mxnet/test_nn.py::test_gmm_conv_bi[g1-int64] PASSED [ 94%] tests/mxnet/test_nn.py::test_nn_conv[g0-int32] PASSED [ 94%] tests/mxnet/test_nn.py::test_nn_conv[g0-int64] PASSED [ 94%] tests/mxnet/test_nn.py::test_nn_conv[g1-int32] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g1-int64] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g2-int32] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g2-int64] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g3-int32] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g3-int64] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g4-int32] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g4-int64] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g5-int32] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv[g5-int64] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv[g6-int32] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv[g6-int64] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv[g7-int32] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv[g7-int64] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv_bi[g0-int32] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv_bi[g0-int64] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv_bi[g1-int32] PASSED [ 97%] tests/mxnet/test_nn.py::test_nn_conv_bi[g1-int64] PASSED [ 97%] tests/mxnet/test_nn.py::test_nn_conv_bi[g2-int32] PASSED [ 97%] tests/mxnet/test_nn.py::test_nn_conv_bi[g2-int64] PASSED [ 97%] tests/mxnet/test_nn.py::test_sg_conv[1] PASSED [ 97%] tests/mxnet/test_nn.py::test_sg_conv[2] PASSED [ 97%] tests/mxnet/test_nn.py::test_set2set FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp1-g1] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp2-g0] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp2-g1] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp3-g0] PASSED [ 97%] tests/mxnet/test_nn.py::test_glob_att_pool PASSED [ 97%] tests/mxnet/test_nn.py::test_simple_pool PASSED [ 98%] tests/mxnet/test_nn.py::test_rgcn[1] PASSED [ 98%] tests/mxnet/test_nn.py::test_rgcn[2] PASSED [ 98%] tests/mxnet/test_nn.py::test_rgcn[8] PASSED [ 98%] tests/mxnet/test_nn.py::test_sequential PASSED [ 98%] tests/mxnet/test_nn.py::test_hetero_conv[sum-int32] PASSED [ 98%] tests/mxnet/test_nn.py::test_hetero_conv[sum-int64] PASSED [ 98%] tests/mxnet/test_nn.py::test_hetero_conv[max-int32] PASSED [ 98%] tests/mxnet/test_nn.py::test_hetero_conv[max-int64] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp3-g1] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp4-g0] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp4-g1] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp0-g0] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp0-g1] Server (1) shutdown. Server is exiting... Done sampling PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[min-int32] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[min-int64] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[mean-int32] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[mean-int64] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[stack-int32] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp1-g0] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp1-g1] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp2-g0] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp2-g1] Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:32:59] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 1500 nodes and 2000 edges into 2 parts and get 31 edge cuts Metis partitioning: 0.001 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.002s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.001 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.004s, peak mem: 1.529 GB part 0 has 501 nodes of type game and 485 are inside the partition part 0 has 257 nodes of type user and 243 are inside the partition part 0 has 476 edges of type buys and 460 are inside the partition part 1 has 529 nodes of type game and 515 are inside the partition part 1 has 273 nodes of type user and 257 are inside the partition part 1 has 555 edges of type buys and 540 are inside the partition Save partitions: 0.002 seconds, peak memory: 1.529 GB There are 1000 edges in the graph and 0 edge cuts for 2 partitions. PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[stack-int64] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[myagg-int32] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[myagg-int64] PASSED [100%] =============================== warnings summary =============================== ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/utils.py:37 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/utils.py:37: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations bool = onp.bool ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:143 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:143: DeprecationWarning: In accordance with NEP 32, the function mirr was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial mirr = onp.mirr ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:160 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:160: DeprecationWarning: In accordance with NEP 32, the function npv was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial npv = onp.npv ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:164 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:164: DeprecationWarning: In accordance with NEP 32, the function pmt was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial pmt = onp.pmt ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:173 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:173: DeprecationWarning: In accordance with NEP 32, the function ppmt was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial ppmt = onp.ppmt ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:176 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:176: DeprecationWarning: In accordance with NEP 32, the function pv was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial pv = onp.pv ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:177 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy/fallback.py:177: DeprecationWarning: In accordance with NEP 32, the function rate was removed from NumPy version 1.20. A replacement for this function is available in the numpy_financial library: https://pypi.org/project/numpy-financial rate = onp.rate ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:48 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:48: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. cur_np_ver = LooseVersion(_np.__version__) ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:49 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:49: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. np_1_17_ver = LooseVersion('1.17') ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:68 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:68: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. cur_np_ver = LooseVersion(_np.__version__) ../../../../opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:69 /opt/conda/envs/mxnet-ci/lib/python3.7/site-packages/mxnet/numpy_dispatch_protocol.py:69: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. np_1_15_ver = LooseVersion('1.15') python/dgl/backend/mxnet/tensor.py:15 python/dgl/backend/mxnet/tensor.py:15 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/mxnet/tensor.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(mx.__version__) < LooseVersion("1.6.0"): python/dgl/backend/mxnet/tensor.py:31 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/mxnet/tensor.py:31: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations 'bool' : np.bool} # mxnet does not support bool python/dgl/backend/mxnet/tensor.py:37: 290 warnings tests/mxnet/test_nn.py: 388 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/mxnet/tensor.py:37: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations if dtype == np.bool: python/dgl/backend/mxnet/tensor.py:136: 230 warnings tests/mxnet/test_nn.py: 464 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/mxnet/tensor.py:136: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations if ty == np.bool: tests/mxnet/test_nn.py::test_cheb_conv[1] tests/mxnet/test_nn.py::test_cheb_conv[20] /root/jenkins/workspace/dgl_PR-4648/python/dgl/nn/mxnet/conv/chebconv.py:121: DGLWarning: lambda_max is not provided, using default value of 2. Please use dgl.laplacian_lambda_max to compute the eigenvalues. "lambda_max is not provided, using default value of 2. " -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html -- generated xml file: /root/jenkins/workspace/dgl_PR-4648/pytest_backend.xml -- ============================ slowest 100 durations ============================= 0.33s call tests/mxnet/test_nn.py::test_graph_conv[1-int32] 0.32s call tests/mxnet/test_geometry.py::test_fps 0.06s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g5-int64] 0.05s call tests/mxnet/test_nn.py::test_hetero_conv[sum-int32] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[min-int64] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[mean-int32] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[stack-int32] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[max-int32] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[min-int32] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[sum-int64] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[mean-int64] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[max-int64] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[myagg-int32] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[myagg-int64] 0.04s call tests/mxnet/test_nn.py::test_hetero_conv[stack-int64] 0.03s call tests/mxnet/test_nn.py::test_set2set 0.03s call tests/mxnet/test_nn.py::test_dense_cheb_conv[1] 0.03s call tests/mxnet/test_nn.py::test_dense_cheb_conv[2] 0.03s call tests/mxnet/test_nn.py::test_gg_conv 0.02s call tests/mxnet/test_nn.py::test_graph_conv[1-int64] 0.02s call tests/mxnet/test_nn.py::test_graph_conv[2-int64] 0.02s call tests/mxnet/test_nn.py::test_graph_conv[2-int32] 0.02s call tests/mxnet/test_nn.py::test_rgcn[8] 0.02s call tests/mxnet/test_nn.py::test_rgcn[2] 0.01s call tests/mxnet/test_nn.py::test_simple_pool 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g6-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g5-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g5-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g0-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g1-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g6-int32] 0.01s call tests/mxnet/test_nn.py::test_rgcn[1] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g6-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g1-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g6-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g5-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g6-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g2-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g4-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g6-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g1-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g4-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g5-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g0-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g4-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g0-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g3-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g0-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g3-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g2-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g6-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g3-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g1-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g4-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g6-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g5-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g2-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g5-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g3-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g5-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g2-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g2-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g2-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g4-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g4-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g2-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g0-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g2-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g0-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g4-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g1-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g3-int64] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[2-mean-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g0-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g0-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g3-int64] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[1-mean-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g1-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g1-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g1-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g3-int64] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[2-mean-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g4-int64] 0.01s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g3-int64] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[1-gcn-int64] 0.01s call tests/mxnet/test_nn.py::test_sequential 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[1-gcn-int32] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[1-pool-int32] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[1-mean-int32] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[2-gcn-int64] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[1-pool-int64] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[2-gcn-int32] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[2-pool-int64] 0.01s call tests/mxnet/test_nn.py::test_sage_conv_bi2[2-pool-int32] 0.01s call tests/mxnet/test_nn.py::test_tagconv[1] 0.01s call tests/mxnet/test_nn.py::test_tagconv[2] 0.01s call tests/mxnet/test_nn.py::test_glob_att_pool 0.01s call tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g1-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g1-int32] 0.01s call tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g1-int32] ===================== 806 passed, 1388 warnings in 11.78s ====================== FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp3-g0] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp3-g1] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp4-g0] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp4-g1] FAILED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp0-g0] Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: FAILED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp0-g1] FAILED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp1-g0] FAILED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp1-g1] FAILED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp2-g0] FAILED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp2-g1] FAILED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp3-g0] FAILED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp3-g1] FAILED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp4-g0] FAILED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp4-g1] [Pipeline] } /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:33:00] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:00] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:10976]... [Pipeline] // timeout FAILED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp4-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp0-g0] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp0-g1] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp1-g0] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp1-g1] [Pipeline] } [Pipeline] // stage Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp2-g0] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp2-g1] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp3-g0] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp3-g1] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp4-g0] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp4-g1] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp0-g0] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp0-g1] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp1-g0] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp1-g1] [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 93431508da8c7bd1ffa88cc07c573b36fd6622338dac6808937ce4f0b46901f0 FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp2-g0] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp2-g1] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp3-g0] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp3-g1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:33:01] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:01] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:10978]... [05:33:01] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:01] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp4-g0] FAILED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp4-g1] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp0-g0] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp0-g1] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp1-g0] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp1-g1] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp2-g0] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp2-g1] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp3-g0] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp3-g1] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp4-g0] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp4-g1] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp3-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp4-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp0-g0] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp0-g1] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp1-g0] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp1-g1] FAILED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp2-g0] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp2-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp3-g0] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp3-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp4-g0] $ docker rm -f 93431508da8c7bd1ffa88cc07c573b36fd6622338dac6808937ce4f0b46901f0 FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp4-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp0-g0] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp0-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp1-g0] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp1-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp2-g0] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp2-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp3-g0] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp3-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp4-g0] [Pipeline] // withDockerContainer FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp4-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp0-g0] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp0-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp1-g0] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp1-g1] [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } [Pipeline] // stage [Pipeline] } FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp2-g0] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp2-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp3-g0] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp3-g1] FAILED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp4-g0] FAILED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp4-g1] FAILED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp0-g0] FAILED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp0-g1] FAILED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp1-g0] FAILED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp1-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp2-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp2-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp3-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp3-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp4-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp4-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp0-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp0-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp1-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp1-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp2-g0] Client [1090] waits on 172.17.0.3:50325 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (1) shutdown. Server is exiting... Server (0) shutdown. Server is exiting... Done sampling FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp2-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp3-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp3-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp4-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp4-g1] Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:33:04] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 1500 nodes and 2000 edges into 2 parts and get 27 edge cuts Metis partitioning: 0.001 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.002s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.001 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.003s, peak mem: 1.529 GB part 0 has 514 nodes of type game and 503 are inside the partition part 0 has 273 nodes of type user and 257 are inside the partition part 0 has 523 edges of type buys and 512 are inside the partition part 1 has 513 nodes of type game and 497 are inside the partition part 1 has 254 nodes of type user and 243 are inside the partition part 1 has 504 edges of type buys and 488 are inside the partition Save partitions: 0.002 seconds, peak memory: 1.529 GB There are 1000 edges in the graph and 0 edge cuts for 2 partitions. FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp0-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp0-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp1-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp1-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp2-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp2-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp3-g0] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp3-g1] FAILED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp4-g0] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp4-g1] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp2-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp3-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp0-g0] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp0-g1] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp1-g0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:33:05] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:05] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:26559]... FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp1-g1] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp2-g0] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp2-g1] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp3-g0] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp3-g1] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp4-g0] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp4-g1] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp0-g0] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp0-g1] FAILED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp1-g0] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp1-g1] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp2-g0] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp2-g1] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp3-g0] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp3-g1] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp4-g0] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp4-g1] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp0-g0] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp0-g1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:33:06] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:06] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:26561]... [05:33:06] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:06] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp1-g0] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp1-g1] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp2-g0] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp2-g1] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp3-g0] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp3-g1] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp4-g0] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp4-g1] FAILED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp0-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp2-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp3-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp0-g0] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp0-g1] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp1-g0] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp1-g1] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp2-g0] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp2-g1] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp3-g0] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp3-g1] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp4-g0] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp4-g1] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp0-g0] FAILED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp0-g1] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp1-g0] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp1-g1] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp2-g0] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp2-g1] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp3-g0] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp3-g1] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp4-g0] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp4-g1] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp0-g0] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp0-g1] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp1-g0] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp1-g1] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp2-g0] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp2-g1] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp3-g0] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp3-g1] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp4-g0] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp4-g1] FAILED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp0-g0] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp0-g1] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp1-g0] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp1-g1] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp2-g0] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp2-g1] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp3-g0] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp3-g1] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp4-g0] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp4-g1] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp0-g0] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp0-g1] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp1-g0] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp1-g1] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp2-g0] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp2-g1] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp3-g0] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp3-g1] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp4-g0] Client [1090] waits on 172.17.0.3:50565 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp4-g1] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp0-g0] FAILED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp0-g1] FAILED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp1-g0] FAILED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp1-g1] FAILED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp2-g0] FAILED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp2-g1] FAILED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp3-g0] FAILED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp3-g1] FAILED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp4-g0] FAILED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp4-g1] FAILED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp3-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp4-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp4-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp0-g0] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp0-g1] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp1-g0] Done sampling Server (1) shutdown.Server (0) shutdown. Server is exiting... Server is exiting... FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp1-g1] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp2-g0] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp2-g1] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp3-g0] PASSED [ 25%] tests/distributed/test_distributed_sampling.py::test_standalone_sampling NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Calculate edge assignment: 0.000 seconds Save partitions: 0.018 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 1 partitions. Client[-1] in group[-1] is exiting... NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp3-g1] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp4-g0] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp4-g1] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp0-g0] Save partitions: 0.043 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 1 partitions. Client[-1] in group[-1] is exiting... PASSED [ 27%] tests/distributed/test_distributed_sampling.py::test_rpc_in_subgraph NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:33:11] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 2 parts and get 260 edge cuts Metis partitioning: 0.002 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.003s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.006s, peak mem: 1.529 GB part 0 has 1512 nodes and 1354 are inside the partition part 0 has 5630 edges and 5370 are inside the partition part 1 has 1552 nodes and 1354 are inside the partition part 1 has 5446 edges and 5186 are inside the partition Save partitions: 0.015 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 2 partitions. FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp0-g1] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp1-g0] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp1-g1] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp2-g0] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp2-g1] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp3-g0] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp3-g1] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp4-g0] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp4-g1] FAILED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp0-g0] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp0-g1] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp1-g0] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp1-g1] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp2-g0] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp2-g1] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp3-g0] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp3-g1] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp4-g0] FAILED [ 71%]/root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_in_subgraph start graph service on server 0 for part 0 [05:33:12] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:12] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:17223]... tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp4-g1] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp0-g0] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp0-g1] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp1-g0] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp1-g1] FAILED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp2-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp2-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp3-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp3-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp4-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp4-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp0-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp0-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp1-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp1-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp2-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp2-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp3-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp3-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp4-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp4-g1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") [05:33:13] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:13] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. load test_in_subgraph start graph service on server 1 for part 1 [05:33:13] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:13] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:17225]... FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp0-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp0-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp1-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp1-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp2-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp2-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp3-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp3-g1] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp4-g0] FAILED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp4-g1] FAILED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp0-g0] FAILED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp0-g1] FAILED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp1-g0] FAILED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp1-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp2-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp2-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp3-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp3-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp4-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp4-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp0-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp0-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp1-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp1-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp2-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp2-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp3-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp3-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp4-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp4-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp0-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp0-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp1-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp1-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp2-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp2-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp3-g0] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp3-g1] FAILED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp4-g0] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp4-g1] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp3-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp0-g0] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp0-g1] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp1-g0] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp1-g1] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp2-g0] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp2-g1] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp3-g0] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp3-g1] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp4-g0] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp4-g1] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp0-g0] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp0-g1] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp1-g0] FAILED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp1-g1] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp2-g0] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp2-g1] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp3-g0] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp3-g1] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp4-g0] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp4-g1] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp0-g0] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp0-g1] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp1-g0] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp1-g1] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp2-g0] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp2-g1] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp3-g0] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp3-g1] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp4-g0] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp4-g1] FAILED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp0-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp1-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp2-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp3-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp0-g0] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp0-g1] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp1-g0] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp1-g1] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp2-g0] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp2-g1] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp3-g0] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp3-g1] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp4-g0] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp4-g1] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp0-g0] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp0-g1] FAILED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp1-g0] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp1-g1] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp2-g0] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp2-g1] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp3-g0] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp3-g1] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp4-g0] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp4-g1] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp0-g0] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp0-g1] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp1-g0] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp1-g1] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp2-g0] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp2-g1] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp3-g0] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp3-g1] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp4-g0] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp4-g1] FAILED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp0-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp0-g1] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp1-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp1-g1] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp2-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp2-g1] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp3-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp3-g1] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp4-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp4-g1] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp0-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp0-g1] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp1-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp1-g1] Client [1090] waits on 172.17.0.3:39697 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp2-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp2-g1] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp3-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp3-g1] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp4-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp4-g1] Server (1) shutdown. Server is exiting... FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp0-g0] FAILED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp0-g1] FAILED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp1-g0] FAILED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp1-g1] PASSED [ 29%] tests/distributed/test_distributed_sampling.py::test_standalone_etype_sampling NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.002 seconds, peak memory: 1.529 GB There are 21112 edges in the graph and 0 edge cuts for 1 partitions. Client[-1] in group[-1] is exiting... NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.030 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 1 partitions. Client[-1] in group[-1] is exiting... NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Calculate edge assignment: 0.000 seconds FAILED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp2-g0] FAILED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp2-g1] FAILED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp3-g0] FAILED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp3-g1] Save partitions: 0.026 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 1 partitions. Client[-1] in group[-1] is exiting... PASSED [ 30%] tests/distributed/test_mp_dataloader.py::test_standalone NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. torch.int64 Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Save partitions: 0.015 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 1 partitions. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. FAILED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp4-g0] FAILED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp4-g1] FAILED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp3-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp3-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp4-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp4-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp0-g0] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp0-g1] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Client[-1] in group[-1] is exiting... PASSED [ 32%] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-True-True-0-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. torch.int64 Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:33:21] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.002 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.003s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.006s, peak mem: 1.529 GB part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.017 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp1-g0] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp1-g1] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp2-g0] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp2-g1] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g0] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g1] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g0] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g1] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g0] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g1] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp1-g0] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp1-g1] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp2-g0] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp2-g1] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp3-g0] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp3-g1] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp4-g0] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp4-g1] FAILED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp0-g0] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp0-g1] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp1-g0] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp1-g1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:33:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:23848]... FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp2-g0] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp2-g1] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp3-g0] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp3-g1] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp4-g0] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp4-g1] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp2-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp3-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp3-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp4-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp0-g0] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp0-g1] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp1-g0] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:33:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:23851]... FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp1-g1] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp2-g0] FAILED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp2-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp3-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp3-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp4-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp4-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp0-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp0-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp1-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp1-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp2-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp2-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp3-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp3-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp4-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp4-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp0-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp0-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp1-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp1-g1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:33:24] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:24] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:23854]... FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp2-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp2-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp3-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp3-g1] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp4-g0] FAILED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp4-g1] FAILED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp0-g0] FAILED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp0-g1] FAILED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp1-g0] FAILED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp1-g1] FAILED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp2-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp2-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp3-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp3-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp4-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp4-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp0-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp0-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp1-g0] [05:33:25] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:25] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp1-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp2-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp2-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp3-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp3-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp4-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp4-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp0-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp0-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp1-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp1-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp2-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp2-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp3-g0] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp3-g1] FAILED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp4-g0] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp4-g1] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp2-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp3-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp0-g0] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp0-g1] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp1-g0] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp1-g1] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp2-g0] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp2-g1] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp3-g0] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp3-g1] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp4-g0] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp4-g1] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp0-g0] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp0-g1] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp1-g0] FAILED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp1-g1] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp2-g0] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp2-g1] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp3-g0] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp3-g1] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp4-g0] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp4-g1] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp0-g0] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp0-g1] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp1-g0] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp1-g1] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp2-g0] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp2-g1] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp3-g0] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp3-g1] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp4-g0] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp4-g1] FAILED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp2-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp2-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g0] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g1] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g0] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g1] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp2-g0] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp2-g1] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g0] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g1] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g0] Client [1719] waits on 172.17.0.3:55355 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g1] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g0] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g1] FAILED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g0] Client[0] in group[0] is exiting... Server (1) shutdown. Server is exiting... Server (0) shutdown. Server is exiting... FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g1] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g0] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g1] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g0] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g1] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g0] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g1] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g0] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g1] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g0] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g1] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g0] Server (2) shutdown. Server is exiting... FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g1] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g0] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g1] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g0] FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g1] PASSED [ 34%] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-True-True-4-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. torch.int64 Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:33:30] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.002 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.003s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.007s, peak mem: 1.529 GB part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.017 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. FAILED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp0-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g0] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g1] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g0] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g1] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g0] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g1] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g0] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g1] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g0] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g1] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g0] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g1] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g0] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g1] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g0] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:33:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21149]... FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g1] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g0] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g1] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g0] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g1] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g0] FAILED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g1] FAILED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g0] FAILED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g1] FAILED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g0] FAILED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g1] FAILED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g0] FAILED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g1] FAILED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g0] FAILED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g1] FAILED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp0-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp0-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp1-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp1-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp2-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp3-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp3-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp4-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp4-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-src-g0] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:33:32] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:32] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21152]... FAILED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-dst-g0] FAILED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-src-g0] FAILED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-dst-g0] FAILED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-src-g0] FAILED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-dst-g0] FAILED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-src-g0] FAILED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-dst-g0] FAILED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-src-g0] FAILED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-dst-g0] FAILED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-src-g0] FAILED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-dst-g0] FAILED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[sum] FAILED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[max] FAILED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[min] FAILED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[mean] FAILED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-idtype0] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-idtype1] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-idtype0] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-idtype1] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-idtype0] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-idtype1] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-idtype0] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-idtype1] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-idtype0] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-idtype1] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-idtype0] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-idtype1] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-idtype0] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-idtype1] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-idtype0] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-idtype1] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-idtype0] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-idtype1] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:33:33] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:33] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21155]... FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-idtype0] FAILED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-idtype1] FAILED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-1] FAILED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-8] FAILED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-16] FAILED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-64] FAILED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-256] FAILED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-1] FAILED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-8] FAILED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-16] FAILED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-64] FAILED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-256] FAILED [ 94%] tests/compute/test_sparse.py::test_use_libxsmm_switch FAILED [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[idtype0] FAILED [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[idtype1] FAILED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[idtype0] [05:33:34] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:34] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. FAILED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[idtype1] FAILED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[idtype0] FAILED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[idtype1] FAILED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype0] FAILED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype1] [05:33:35] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:35] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:33:35] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:35] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:33:35] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:35] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:33:35] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:35] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. FAILED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[idtype0] FAILED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[idtype1] FAILED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[idtype0] FAILED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[idtype1] FAILED [ 95%] tests/compute/test_subgraph.py::test_edge_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph1[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph1[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_message_passing PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype1] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype1] Client [1756] waits on 172.17.0.3:58147Client [1755] waits on 172.17.0.3:52765 Client [1753] waits on 172.17.0.3:32967Client [1754] waits on 172.17.0.3:38817Client [1749] waits on 172.17.0.3:43513 Machine (0) group (0) client (1) connect to server successfuly!Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (3) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly!Machine (0) group (0) client (4) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. PASSED [ 95%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device1] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device2] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device3] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device1] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device2] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device3] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype0-device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype0-device1] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype1-device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype1-device1] SKIPPED [ 96%] tests/compute/test_transform.py::test_line_graph1 PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[idtype0] PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_no_backtracking PASSED [ 96%] tests/compute/test_transform.py::test_reverse[idtype0] PASSED [ 96%] tests/compute/test_transform.py::test_reverse[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] Client[2] in group[0] is exiting... Client[0] in group[0] is exiting... PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_to_bidirected PASSED [ 96%] tests/compute/test_transform.py::test_add_reverse_edges PASSED [ 96%] tests/compute/test_transform.py::test_simple_graph PASSED [ 96%] tests/compute/test_transform.py::test_khop_graph Client[1] in group[0] is exiting... FAILED [ 96%] tests/compute/test_transform.py::test_khop_adj FAILED [ 96%] tests/compute/test_transform.py::test_laplacian_lambda_max PASSED [ 97%] tests/compute/test_transform.py::test_partition_with_halo Client[3] in group[0] is exiting... PASSED [ 97%] tests/compute/test_transform.py::test_metis_partition[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_metis_partition[idtype1] Client[4] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... Server (2) shutdown. Server is exiting... PASSED [ 36%] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-True-False-0-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. torch.int64 Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB PASSED [ 97%] tests/compute/test_transform.py::test_reorder_nodes [05:33:42] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.004 seconds Splitting the graph into partitions takes 0.008s, peak mem: 1.529 GB part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.017 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. PASSED [ 97%] tests/compute/test_transform.py::test_compact[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_compact[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_to_simple[idtype0] FAILED [ 97%] tests/compute/test_transform.py::test_to_simple[idtype1] FAILED [ 97%] tests/compute/test_transform.py::test_to_block[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_to_block[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[idtype0] FAILED [ 97%] tests/compute/test_transform.py::test_remove_edges[idtype1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:33:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:19105]... FAILED [ 97%] tests/compute/test_transform.py::test_add_edges[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[idtype0] FAILED [ 97%] tests/compute/test_transform.py::test_remove_nodes[idtype1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:33:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:19108]... FAILED [ 97%] tests/compute/test_transform.py::test_add_selfloop[idtype0] FAILED [ 97%] tests/compute/test_transform.py::test_add_selfloop[idtype1] FAILED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[idtype0] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:33:45] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:45] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:19111]... FAILED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[idtype1] FAILED [ 97%] tests/compute/test_transform.py::test_reorder_graph[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_norm_by_dst[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_norm_by_dst[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_to_simple[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_to_simple[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_line_graph[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_line_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[idtype0] [05:33:46] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:46] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. FAILED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[idtype1] FAILED [ 98%] tests/compute/test_transform.py::test_module_ppr[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_ppr[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_gdc[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_gdc[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_node[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_node[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_add_edge[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_add_edge[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_sign[g0] FAILED [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[idtype1] PASSED [ 99%] tests/compute/test_traversal.py::test_bfs[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_bfs[idtype1] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[idtype1] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[idtype1] PASSED [100%] =================================== FAILURES =================================== __________________________ test_unary_copy_u[idtype0] __________________________ idtype = torch.int32 @parametrize_idtype def test_unary_copy_u(idtype): def _test(mfunc): g = create_test_heterograph(idtype) x1 = F.randn((g.num_nodes('user'), feat_size)) x2 = F.randn((g.num_nodes('developer'), feat_size)) F.attach_grad(x1) F.attach_grad(x2) g.nodes['user'].data['h'] = x1 g.nodes['developer'].data['h'] = x2 ################################################################# # apply_edges() is called on each relation type separately ################################################################# with F.record_grad(): [g.apply_edges(fn.copy_u('h', 'm'), etype = rel) for rel in g.canonical_etypes] r1 = g['plays'].edata['m'] F.backward(r1, F.ones(r1.shape)) n_grad1 = F.grad(g.ndata['h']['user']) # TODO (Israt): clear not working g.edata['m'].clear() ################################################################# # apply_edges() is called on all relation types ################################################################# g.apply_edges(fn.copy_u('h', 'm')) r2 = g['plays'].edata['m'] F.backward(r2, F.ones(r2.shape)) n_grad2 = F.grad(g.nodes['user'].data['h']) # correctness check def _print_error(a, b): for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y): print('@{} {} v.s. {}'.format(i, x, y)) if not F.allclose(r1, r2): _print_error(r1, r2) assert F.allclose(r1, r2) if not F.allclose(n_grad1, n_grad2): print('node grad') _print_error(n_grad1, n_grad2) assert(F.allclose(n_grad1, n_grad2)) > _test(fn.copy_u) tests/compute/test_apply_edges_hetero.py:92: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_apply_edges_hetero.py:62: in _test for rel in g.canonical_etypes] tests/compute/test_apply_edges_hetero.py:62: in for rel in g.canonical_etypes] python/dgl/heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python/dgl/core.py:276: in invoke_gsddmm z = op(graph, x) python/dgl/ops/sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-0.8083, 2.2130], [-1.3532, -0.4294]], requires_grad=True) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError __________________________ test_unary_copy_u[idtype1] __________________________ idtype = torch.int64 @parametrize_idtype def test_unary_copy_u(idtype): def _test(mfunc): g = create_test_heterograph(idtype) x1 = F.randn((g.num_nodes('user'), feat_size)) x2 = F.randn((g.num_nodes('developer'), feat_size)) F.attach_grad(x1) F.attach_grad(x2) g.nodes['user'].data['h'] = x1 g.nodes['developer'].data['h'] = x2 ################################################################# # apply_edges() is called on each relation type separately ################################################################# with F.record_grad(): [g.apply_edges(fn.copy_u('h', 'm'), etype = rel) for rel in g.canonical_etypes] r1 = g['plays'].edata['m'] F.backward(r1, F.ones(r1.shape)) n_grad1 = F.grad(g.ndata['h']['user']) # TODO (Israt): clear not working g.edata['m'].clear() ################################################################# # apply_edges() is called on all relation types ################################################################# g.apply_edges(fn.copy_u('h', 'm')) r2 = g['plays'].edata['m'] F.backward(r2, F.ones(r2.shape)) n_grad2 = F.grad(g.nodes['user'].data['h']) # correctness check def _print_error(a, b): for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y): print('@{} {} v.s. {}'.format(i, x, y)) if not F.allclose(r1, r2): _print_error(r1, r2) assert F.allclose(r1, r2) if not F.allclose(n_grad1, n_grad2): print('node grad') _print_error(n_grad1, n_grad2) assert(F.allclose(n_grad1, n_grad2)) > _test(fn.copy_u) tests/compute/test_apply_edges_hetero.py:92: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_apply_edges_hetero.py:62: in _test for rel in g.canonical_etypes] tests/compute/test_apply_edges_hetero.py:62: in for rel in g.canonical_etypes] python/dgl/heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python/dgl/core.py:276: in invoke_gsddmm z = op(graph, x) python/dgl/ops/sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[ 0.7500, 0.1746], [-1.8386, -3.9195]], requires_grad=True) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ___________________________ test_binary_op[idtype0] ____________________________ idtype = torch.int32 @parametrize_idtype def test_binary_op(idtype): def _test(lhs, rhs, binary_op): g = create_test_heterograph(idtype) n1 = F.randn((g.num_nodes('user'), feat_size)) n2 = F.randn((g.num_nodes('developer'), feat_size)) n3 = F.randn((g.num_nodes('game'), feat_size)) x1 = F.randn((g.num_edges('plays'),feat_size)) x2 = F.randn((g.num_edges('follows'),feat_size)) x3 = F.randn((g.num_edges('develops'),feat_size)) x4 = F.randn((g.num_edges('wishes'),feat_size)) builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs) builtin_msg = getattr(fn, builtin_msg_name) ################################################################# # apply_edges() is called on each relation type separately ################################################################# F.attach_grad(n1) F.attach_grad(n2) F.attach_grad(n3) g.nodes['user'].data['h'] = n1 g.nodes['developer'].data['h'] = n2 g.nodes['game'].data['h'] = n3 F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) F.attach_grad(x4) g['plays'].edata['h'] = x1 g['follows'].edata['h'] = x2 g['develops'].edata['h'] = x3 g['wishes'].edata['h'] = x4 with F.record_grad(): [g.apply_edges(builtin_msg('h', 'h', 'm'), etype = rel) for rel in g.canonical_etypes] r1 = g['plays'].edata['m'] loss = F.sum(r1.view(-1), 0) F.backward(loss) n_grad1 = F.grad(g.nodes['game'].data['h']) ################################################################# # apply_edges() is called on all relation types ################################################################# F.attach_grad(n1) F.attach_grad(n2) F.attach_grad(n3) g.nodes['user'].data['h'] = n1 g.nodes['developer'].data['h'] = n2 g.nodes['game'].data['h'] = n3 F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) F.attach_grad(x4) g['plays'].edata['h'] = x1 g['follows'].edata['h'] = x2 g['develops'].edata['h'] = x3 g['wishes'].edata['h'] = x4 with F.record_grad(): g.apply_edges(builtin_msg('h', 'h', 'm')) r2 = g['plays'].edata['m'] loss = F.sum(r2.view(-1), 0) F.backward(loss) n_grad2 = F.grad(g.nodes['game'].data['h']) # correctness check def _print_error(a, b): for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y): print('@{} {} v.s. {}'.format(i, x, y)) if not F.allclose(r1, r2): _print_error(r1, r2) assert F.allclose(r1, r2) if n_grad1 is not None or n_grad2 is not None: if not F.allclose(n_grad1, n_grad2): print('node grad') _print_error(n_grad1, n_grad2) assert(F.allclose(n_grad1, n_grad2)) target = ["u", "v", "e"] for lhs, rhs in product(target, target): if lhs == rhs: continue for binary_op in ["add", "sub", "mul", "div", "dot"]: print(lhs, rhs, binary_op) > _test(lhs, rhs, binary_op) tests/compute/test_apply_edges_hetero.py:242: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_apply_edges_hetero.py:190: in _test for rel in g.canonical_etypes] tests/compute/test_apply_edges_hetero.py:190: in for rel in g.canonical_etypes] python/dgl/heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python/dgl/core.py:266: in invoke_gsddmm z = op(graph, x, y) python/dgl/ops/sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-1.9796, -0.1490], [-1.0488, -0.7427]], requires_grad=True) rhs_data = tensor([[-0.7523, -0.9474], [ 0.0539, -0.9849]], requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- u v add ___________________________ test_binary_op[idtype1] ____________________________ idtype = torch.int64 @parametrize_idtype def test_binary_op(idtype): def _test(lhs, rhs, binary_op): g = create_test_heterograph(idtype) n1 = F.randn((g.num_nodes('user'), feat_size)) n2 = F.randn((g.num_nodes('developer'), feat_size)) n3 = F.randn((g.num_nodes('game'), feat_size)) x1 = F.randn((g.num_edges('plays'),feat_size)) x2 = F.randn((g.num_edges('follows'),feat_size)) x3 = F.randn((g.num_edges('develops'),feat_size)) x4 = F.randn((g.num_edges('wishes'),feat_size)) builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs) builtin_msg = getattr(fn, builtin_msg_name) ################################################################# # apply_edges() is called on each relation type separately ################################################################# F.attach_grad(n1) F.attach_grad(n2) F.attach_grad(n3) g.nodes['user'].data['h'] = n1 g.nodes['developer'].data['h'] = n2 g.nodes['game'].data['h'] = n3 F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) F.attach_grad(x4) g['plays'].edata['h'] = x1 g['follows'].edata['h'] = x2 g['develops'].edata['h'] = x3 g['wishes'].edata['h'] = x4 with F.record_grad(): [g.apply_edges(builtin_msg('h', 'h', 'm'), etype = rel) for rel in g.canonical_etypes] r1 = g['plays'].edata['m'] loss = F.sum(r1.view(-1), 0) F.backward(loss) n_grad1 = F.grad(g.nodes['game'].data['h']) ################################################################# # apply_edges() is called on all relation types ################################################################# F.attach_grad(n1) F.attach_grad(n2) F.attach_grad(n3) g.nodes['user'].data['h'] = n1 g.nodes['developer'].data['h'] = n2 g.nodes['game'].data['h'] = n3 F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) F.attach_grad(x4) g['plays'].edata['h'] = x1 g['follows'].edata['h'] = x2 g['develops'].edata['h'] = x3 g['wishes'].edata['h'] = x4 with F.record_grad(): g.apply_edges(builtin_msg('h', 'h', 'm')) r2 = g['plays'].edata['m'] loss = F.sum(r2.view(-1), 0) F.backward(loss) n_grad2 = F.grad(g.nodes['game'].data['h']) # correctness check def _print_error(a, b): for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y): print('@{} {} v.s. {}'.format(i, x, y)) if not F.allclose(r1, r2): _print_error(r1, r2) assert F.allclose(r1, r2) if n_grad1 is not None or n_grad2 is not None: if not F.allclose(n_grad1, n_grad2): print('node grad') _print_error(n_grad1, n_grad2) assert(F.allclose(n_grad1, n_grad2)) target = ["u", "v", "e"] for lhs, rhs in product(target, target): if lhs == rhs: continue for binary_op in ["add", "sub", "mul", "div", "dot"]: print(lhs, rhs, binary_op) > _test(lhs, rhs, binary_op) tests/compute/test_apply_edges_hetero.py:242: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_apply_edges_hetero.py:190: in _test for rel in g.canonical_etypes] tests/compute/test_apply_edges_hetero.py:190: in for rel in g.canonical_etypes] python/dgl/heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python/dgl/core.py:266: in invoke_gsddmm z = op(graph, x, y) python/dgl/ops/sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.4311, -1.4427], [-1.3631, -0.8587]], requires_grad=True) rhs_data = tensor([[-0.3154, -0.5503], [-0.4826, -0.9824]], requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- u v add ___________________________ test_issue_1088[idtype0] ___________________________ idtype = torch.int32 @parametrize_idtype def test_issue_1088(idtype): # This test ensures that message passing on a heterograph with one edge type # would not crash (GitHub issue #1088). import dgl.function as fn g = dgl.heterograph({('U', 'E', 'V'): ([0, 1, 2], [1, 2, 3])}, idtype=idtype, device=F.ctx()) g.nodes['U'].data['x'] = F.randn((3, 3)) > g.update_all(fn.copy_u('x', 'm'), fn.sum('m', 'y')) tests/compute/test_basics.py:646: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5470, -1.6287, -1.2363], [-1.1114, 1.4881, 0.0528], [ 0.9083, -0.6497, 0.6860]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________________ test_issue_1088[idtype1] ___________________________ idtype = torch.int64 @parametrize_idtype def test_issue_1088(idtype): # This test ensures that message passing on a heterograph with one edge type # would not crash (GitHub issue #1088). import dgl.function as fn g = dgl.heterograph({('U', 'E', 'V'): ([0, 1, 2], [1, 2, 3])}, idtype=idtype, device=F.ctx()) g.nodes['U'].data['x'] = F.randn((3, 3)) > g.update_all(fn.copy_u('x', 'm'), fn.sum('m', 'y')) tests/compute/test_basics.py:646: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4199, -0.0981, 2.1831], [ 2.6065, -0.8216, -0.6848], [-0.1195, -1.4392, -0.7790]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________________ test_issue_2484[idtype0] ___________________________ idtype = torch.int32 @parametrize_idtype def test_issue_2484(idtype): import dgl.function as fn g = dgl.graph(([0, 1, 2], [1, 2, 3]), idtype=idtype, device=F.ctx()) x = F.copy_to(F.randn((4,)), F.ctx()) g.ndata['x'] = x > g.pull([2, 1], fn.u_add_v('x', 'x', 'm'), fn.sum('m', 'x')) tests/compute/test_basics.py:667: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4711: in pull compute_graph, message_func, reduce_func, apply_node_func) python/dgl/core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python/dgl/core.py:266: in invoke_gsddmm z = op(graph, x, y) python/dgl/ops/sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', lhs_data = tensor([ 0.0074, -1.3295]) rhs_data = tensor([-1.3295, 1.4919]), lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ___________________________ test_issue_2484[idtype1] ___________________________ idtype = torch.int64 @parametrize_idtype def test_issue_2484(idtype): import dgl.function as fn g = dgl.graph(([0, 1, 2], [1, 2, 3]), idtype=idtype, device=F.ctx()) x = F.copy_to(F.randn((4,)), F.ctx()) g.ndata['x'] = x > g.pull([2, 1], fn.u_add_v('x', 'x', 'm'), fn.sum('m', 'x')) tests/compute/test_basics.py:667: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4711: in pull compute_graph, message_func, reduce_func, apply_node_func) python/dgl/core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python/dgl/core.py:266: in invoke_gsddmm z = op(graph, x, y) python/dgl/ops/sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', lhs_data = tensor([ 1.0018, -0.3992]) rhs_data = tensor([-0.3992, -0.2018]), lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError _______________________ test_edge_softmax_unidirectional _______________________ @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') def test_edge_softmax_unidirectional(): g = dgl.heterograph({ ('A', 'AB', 'B'): ([1,2,3,1,2,3,1,2,3],[0,0,0,1,1,1,2,2,2]), ('B', 'BB', 'B'): ([0,1,2,0,1,2,0,1,2], [0,0,0,1,1,1,2,2,2])}) g = g.to(F.ctx()) g.edges['AB'].data['x'] = F.ones(9) * 2 g.edges['BB'].data['x'] = F.ones(9) > result = dgl.ops.edge_softmax(g, {'AB': g.edges['AB'].data['x'], 'BB': g.edges['BB'].data['x']}) tests/compute/test_edge_softmax_hetero.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:144: in edge_softmax eids, norm_by, *logits_tuple) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = eids = '__ALL__', norm_by = 'dst' logits = (tensor([2., 2., 2., 2., 2., 2., 2., 2., 2.]), tensor([1., 1., 1., 1., 1., 1., 1., 1., 1.])) args = (, '__ALL__', 'dst', tensor([2., 2., 2., 2., 2., 2., 2., 2., 2.]), tensor([1., 1., 1., 1., 1., 1., 1., 1., 1.])) def edge_softmax_hetero(gidx, eids=ALL, norm_by='dst', *logits): args = _cast_if_autocast_enabled(gidx, eids, norm_by, *logits) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:775: TypeError ______________________ test_edge_softmax[idtype0-src-g0] _______________________ g = Graph(num_nodes={'developer': 2, 'game': 2, 'user': 3}, num_edges={('developer', 'develops', 'game'): 3, ('user'...('developer', 'game', 'develops'), ('user', 'user', 'follows'), ('user', 'game', 'plays'), ('user', 'game', 'wishes')]) norm_by = 'src', idtype = torch.int32 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) # @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, idtype): print("params", norm_by, idtype) g = create_test_heterograph(idtype) x1 = F.randn((g.num_edges('plays'),feat_size)) x2 = F.randn((g.num_edges('follows'),feat_size)) x3 = F.randn((g.num_edges('develops'),feat_size)) x4 = F.randn((g.num_edges('wishes'),feat_size)) F.attach_grad(F.clone(x1)) F.attach_grad(F.clone(x2)) F.attach_grad(F.clone(x3)) F.attach_grad(F.clone(x4)) g['plays'].edata['eid'] = x1 g['follows'].edata['eid'] = x2 g['develops'].edata['eid'] = x3 g['wishes'].edata['eid'] = x4 ################################################################# # edge_softmax() on homogeneous graph ################################################################# with F.record_grad(): hm_g = dgl.to_homogeneous(g) hm_x = F.cat((x3, x2, x1, x4), 0) hm_e = F.attach_grad(F.clone(hm_x)) > score_hm = edge_softmax(hm_g, hm_e, norm_by=norm_by) tests/compute/test_edge_softmax_hetero.py:91: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[-1.4336, 1.5559], [-0.3350, 0.0120], [ 1.5648, 0.8156], [-1.1123, 1.5411], ...057, 0.2814], [-0.4346, 0.2468], [-0.0544, 0.4573], [-0.6307, -1.2937]], requires_grad=True) eids = '__ALL__', norm_by = 'src' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ----------------------------- Captured stdout call ----------------------------- params src torch.int32 ______________________ test_edge_softmax[idtype0-dst-g0] _______________________ g = Graph(num_nodes={'developer': 2, 'game': 2, 'user': 3}, num_edges={('developer', 'develops', 'game'): 3, ('user'...('developer', 'game', 'develops'), ('user', 'user', 'follows'), ('user', 'game', 'plays'), ('user', 'game', 'wishes')]) norm_by = 'dst', idtype = torch.int32 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) # @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, idtype): print("params", norm_by, idtype) g = create_test_heterograph(idtype) x1 = F.randn((g.num_edges('plays'),feat_size)) x2 = F.randn((g.num_edges('follows'),feat_size)) x3 = F.randn((g.num_edges('develops'),feat_size)) x4 = F.randn((g.num_edges('wishes'),feat_size)) F.attach_grad(F.clone(x1)) F.attach_grad(F.clone(x2)) F.attach_grad(F.clone(x3)) F.attach_grad(F.clone(x4)) g['plays'].edata['eid'] = x1 g['follows'].edata['eid'] = x2 g['develops'].edata['eid'] = x3 g['wishes'].edata['eid'] = x4 ################################################################# # edge_softmax() on homogeneous graph ################################################################# with F.record_grad(): hm_g = dgl.to_homogeneous(g) hm_x = F.cat((x3, x2, x1, x4), 0) hm_e = F.attach_grad(F.clone(hm_x)) > score_hm = edge_softmax(hm_g, hm_e, norm_by=norm_by) tests/compute/test_edge_softmax_hetero.py:91: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[ 0.4611, -1.6093], [ 0.0317, 0.8821], [ 1.5017, -0.5792], [ 1.9286, 1.7655], ...343, 0.1096], [ 0.5236, -0.1736], [ 0.6223, -0.6720], [-1.5531, 0.9349]], requires_grad=True) eids = '__ALL__', norm_by = 'dst' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ----------------------------- Captured stdout call ----------------------------- params dst torch.int32 ______________________ test_edge_softmax[idtype1-src-g0] _______________________ g = Graph(num_nodes={'developer': 2, 'game': 2, 'user': 3}, num_edges={('developer', 'develops', 'game'): 3, ('user'...('developer', 'game', 'develops'), ('user', 'user', 'follows'), ('user', 'game', 'plays'), ('user', 'game', 'wishes')]) norm_by = 'src', idtype = torch.int64 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) # @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, idtype): print("params", norm_by, idtype) g = create_test_heterograph(idtype) x1 = F.randn((g.num_edges('plays'),feat_size)) x2 = F.randn((g.num_edges('follows'),feat_size)) x3 = F.randn((g.num_edges('develops'),feat_size)) x4 = F.randn((g.num_edges('wishes'),feat_size)) F.attach_grad(F.clone(x1)) F.attach_grad(F.clone(x2)) F.attach_grad(F.clone(x3)) F.attach_grad(F.clone(x4)) g['plays'].edata['eid'] = x1 g['follows'].edata['eid'] = x2 g['develops'].edata['eid'] = x3 g['wishes'].edata['eid'] = x4 ################################################################# # edge_softmax() on homogeneous graph ################################################################# with F.record_grad(): hm_g = dgl.to_homogeneous(g) hm_x = F.cat((x3, x2, x1, x4), 0) hm_e = F.attach_grad(F.clone(hm_x)) > score_hm = edge_softmax(hm_g, hm_e, norm_by=norm_by) tests/compute/test_edge_softmax_hetero.py:91: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[-3.0921e-01, 8.0471e-01], [ 1.6534e+00, -3.8585e-01], [-9.6037e-02, 1.2391e+00], [ ... 1.3756e-03, 1.1694e+00], [ 3.5168e-01, -1.3745e+00], [ 1.9409e-01, 1.4674e+00]], requires_grad=True) eids = '__ALL__', norm_by = 'src' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ----------------------------- Captured stdout call ----------------------------- params src torch.int64 ______________________ test_edge_softmax[idtype1-dst-g0] _______________________ g = Graph(num_nodes={'developer': 2, 'game': 2, 'user': 3}, num_edges={('developer', 'develops', 'game'): 3, ('user'...('developer', 'game', 'develops'), ('user', 'user', 'follows'), ('user', 'game', 'plays'), ('user', 'game', 'wishes')]) norm_by = 'dst', idtype = torch.int64 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) # @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, idtype): print("params", norm_by, idtype) g = create_test_heterograph(idtype) x1 = F.randn((g.num_edges('plays'),feat_size)) x2 = F.randn((g.num_edges('follows'),feat_size)) x3 = F.randn((g.num_edges('develops'),feat_size)) x4 = F.randn((g.num_edges('wishes'),feat_size)) F.attach_grad(F.clone(x1)) F.attach_grad(F.clone(x2)) F.attach_grad(F.clone(x3)) F.attach_grad(F.clone(x4)) g['plays'].edata['eid'] = x1 g['follows'].edata['eid'] = x2 g['develops'].edata['eid'] = x3 g['wishes'].edata['eid'] = x4 ################################################################# # edge_softmax() on homogeneous graph ################################################################# with F.record_grad(): hm_g = dgl.to_homogeneous(g) hm_x = F.cat((x3, x2, x1, x4), 0) hm_e = F.attach_grad(F.clone(hm_x)) > score_hm = edge_softmax(hm_g, hm_e, norm_by=norm_by) tests/compute/test_edge_softmax_hetero.py:91: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[-0.1608, 0.2121], [-0.4321, 0.3399], [-0.0639, -0.9893], [ 0.6717, 0.0155], ...806, -0.8436], [ 0.5347, 0.4538], [-0.4843, 0.5824], [-2.2155, -0.7792]], requires_grad=True) eids = '__ALL__', norm_by = 'dst' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ----------------------------- Captured stdout call ----------------------------- params dst torch.int64 ____________________________ test_updates[idtype0] _____________________________ idtype = torch.int32 @parametrize_idtype def test_updates(idtype): def msg_func(edges): return {'m': edges.src['h']} def reduce_func(nodes): return {'y': F.sum(nodes.mailbox['m'], 1)} def apply_func(nodes): return {'y': nodes.data['y'] * 2} g = create_test_heterograph(idtype) x = F.randn((3, 5)) g.nodes['user'].data['h'] = x for msg, red, apply in itertools.product( [fn.copy_u('h', 'm'), msg_func], [fn.sum('m', 'y'), reduce_func], [None, apply_func]): multiplier = 1 if apply is None else 2 > g['user', 'plays', 'game'].update_all(msg, red, apply) tests/compute/test_heterograph.py:1674: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1249, 0.5763, 1.4886, 2.4238, -1.5383], [-1.4728, -0.9454, 0.7629, -1.1135, -0.2008], [-0.1803, 0.4783, 0.1011, 1.6278, -0.1912]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ____________________________ test_updates[idtype1] _____________________________ idtype = torch.int64 @parametrize_idtype def test_updates(idtype): def msg_func(edges): return {'m': edges.src['h']} def reduce_func(nodes): return {'y': F.sum(nodes.mailbox['m'], 1)} def apply_func(nodes): return {'y': nodes.data['y'] * 2} g = create_test_heterograph(idtype) x = F.randn((3, 5)) g.nodes['user'].data['h'] = x for msg, red, apply in itertools.product( [fn.copy_u('h', 'm'), msg_func], [fn.sum('m', 'y'), reduce_func], [None, apply_func]): multiplier = 1 if apply is None else 2 > g['user', 'plays', 'game'].update_all(msg, red, apply) tests/compute/test_heterograph.py:1674: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.4010, 0.5922, -0.0199, 1.0543, -1.0379], [-1.5216, -0.4269, 0.1716, 1.6454, 0.7565], [ 0.9748, 1.6111, 1.3588, 0.0985, -0.3508]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ____________________________ test_backward[idtype0] ____________________________ idtype = torch.int32 @parametrize_idtype def test_backward(idtype): g = create_test_heterograph(idtype) x = F.randn((3, 5)) F.attach_grad(x) g.nodes['user'].data['h'] = x with F.record_grad(): g.multi_update_all( {'plays' : (fn.copy_u('h', 'm'), fn.sum('m', 'y')), 'wishes': (fn.copy_u('h', 'm'), fn.sum('m', 'y'))}, > 'sum') tests/compute/test_heterograph.py:1709: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:5023: in multi_update_all all_out[dtid].append(core.message_passing(g, mfunc, rfunc, afunc)) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.6385, -1.2521, 0.4248, -2.3375, -0.6380], [ 0.0338, 1.4721, -0.7206, -0.1300, 0.5931], [ 0.7461, 1.6199, -0.7145, -1.2436, -0.7472]], requires_grad=True) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ____________________________ test_backward[idtype1] ____________________________ idtype = torch.int64 @parametrize_idtype def test_backward(idtype): g = create_test_heterograph(idtype) x = F.randn((3, 5)) F.attach_grad(x) g.nodes['user'].data['h'] = x with F.record_grad(): g.multi_update_all( {'plays' : (fn.copy_u('h', 'm'), fn.sum('m', 'y')), 'wishes': (fn.copy_u('h', 'm'), fn.sum('m', 'y'))}, > 'sum') tests/compute/test_heterograph.py:1709: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:5023: in multi_update_all all_out[dtid].append(core.message_passing(g, mfunc, rfunc, afunc)) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.4332, -1.3325, 0.0030, 0.7070, -1.5994], [-2.6495, 2.6074, 1.6508, 0.9886, 2.4109], [-0.1265, 0.5570, -1.8708, 0.1544, -0.5959]], requires_grad=True) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _____________________________ test_copy_src_reduce _____________________________ def test_copy_src_reduce(): def _test(red, partial): g = dgl.DGLGraph(nx.erdos_renyi_graph(100, 0.1)) # NOTE(zihao): add self-loop to avoid zero-degree nodes. # https://github.com/dmlc/dgl/issues/761 g.add_edges(g.nodes(), g.nodes()) g = g.to(F.ctx()) hu, hv, he = generate_feature(g, 'none', 'none') if partial: nid = F.tensor(list(range(0, 100, 2)), g.idtype) g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata['v'] = F.attach_grad(F.clone(hv)) g.edata['e'] = F.attach_grad(F.clone(he)) with F.record_grad(): if partial: g.pull(nid, fn.copy_src(src='u', out='m'), builtin[red](msg='m', out='r1')) else: g.update_all(fn.copy_src(src='u', out='m'), builtin[red](msg='m', out='r1')) r1 = g.ndata['r1'] F.backward(F.reduce_sum(r1)) n_grad1 = F.grad(g.ndata['u']) # reset grad g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata['v'] = F.attach_grad(F.clone(hv)) g.edata['e'] = F.attach_grad(F.clone(he)) with F.record_grad(): if partial: g.pull(nid, udf_copy_src, udf_reduce[red]) else: g.update_all(udf_copy_src, udf_reduce[red]) r2 = g.ndata['r2'] F.backward(F.reduce_sum(r2)) n_grad2 = F.grad(g.ndata['u']) def _print_error(a, b): print("ERROR: Test copy_src_{} partial: {}". format(red, partial)) for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y): print('@{} {} v.s. {}'.format(i, x, y)) if not F.allclose(r1, r2): _print_error(r1, r2) assert F.allclose(r1, r2) if not F.allclose(n_grad1, n_grad2): print('node grad') _print_error(n_grad1, n_grad2) assert(F.allclose(n_grad1, n_grad2)) > _test('sum', False) tests/compute/test_kernel.py:134: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_kernel.py:100: in _test builtin[red](msg='m', out='r1')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[[[-0.4279, 0.9162, 0.5406, 0.9737], [-0.5837, -0.7262, 0.8167, -0.8627], [-0.8493, 0... [-0.5046, -0.1275, 0.9672, 0.7794], [ 0.3485, -0.1620, 0.4156, 0.4446]]]], requires_grad=True) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ____________________________ test_copy_edge_reduce _____________________________ def test_copy_edge_reduce(): def _test(red, partial): g = dgl.DGLGraph(nx.erdos_renyi_graph(100, 0.1)) # NOTE(zihao): add self-loop to avoid zero-degree nodes. g.add_edges(g.nodes(), g.nodes()) g = g.to(F.ctx()) hu, hv, he = generate_feature(g, 'none', 'none') if partial: nid = F.tensor(list(range(0, 100, 2)), g.idtype) g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata['v'] = F.attach_grad(F.clone(hv)) g.edata['e'] = F.attach_grad(F.clone(he)) with F.record_grad(): if partial: g.pull(nid, fn.copy_edge(edge='e', out='m'), builtin[red](msg='m', out='r1')) else: g.update_all(fn.copy_edge(edge='e', out='m'), builtin[red](msg='m', out='r1')) r1 = g.ndata['r1'] F.backward(F.reduce_sum(r1)) e_grad1 = F.grad(g.edata['e']) # reset grad g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata['v'] = F.attach_grad(F.clone(hv)) g.edata['e'] = F.attach_grad(F.clone(he)) with F.record_grad(): if partial: g.pull(nid, udf_copy_edge, udf_reduce[red]) else: g.update_all(udf_copy_edge, udf_reduce[red]) r2 = g.ndata['r2'] F.backward(F.reduce_sum(r2)) e_grad2 = F.grad(g.edata['e']) def _print_error(a, b): print("ERROR: Test copy_edge_{} partial: {}". format(red, partial)) return for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y): print('@{} {} v.s. {}'.format(i, x, y)) if not F.allclose(r1, r2): _print_error(r1, r2) assert F.allclose(r1, r2) if not F.allclose(e_grad1, e_grad2): print('edge gradient') _print_error(e_grad1, e_grad2) assert(F.allclose(e_grad1, e_grad2)) > _test('sum', False) tests/compute/test_kernel.py:197: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_kernel.py:162: in _test builtin[red](msg='m', out='r1')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[[[ 4.6492e-05, 7.2036e-02, 3.4002e-01, 6.9137e-01], [ 2.1845e-01, 5.2046e-01, -6.6986e-01, 3.1...1.2087e-01, -2.6184e-01], [-5.5105e-01, 6.1630e-01, -8.6822e-01, 5.0840e-01]]]], requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________________ test_all_binary_builtins ___________________________ def test_all_binary_builtins(): def _test(g, lhs, rhs, binary_op, reducer, partial, nid, broadcast='none'): # initialize node/edge features with uniform(-1, 1) hu, hv, he = generate_feature(g, broadcast, binary_op) if binary_op == 'div': # op = div # lhs range: [-1, 1] # rhs range: [1, 2] # result range: [-1, 1] if rhs == 'u': hu = (hu + 3) / 2 elif rhs == 'v': hv = (hv + 3) / 2 elif rhs == 'e': he = (he + 3) / 2 if binary_op == 'add' or binary_op == 'sub': # op = add, sub # lhs range: [-1/2, 1/2] # rhs range: [-1/2, 1/2] # result range: [-1, 1] hu = hu / 2 hv = hv / 2 he = he / 2 g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata['v'] = F.attach_grad(F.clone(hv)) g.edata['e'] = F.attach_grad(F.clone(he)) builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs) builtin_msg = getattr(fn, builtin_msg_name) builtin_red = getattr(fn, reducer) def target_feature_switch(g, target): if target == "u": return g.ndata["u"] elif target == "v": return g.ndata["v"] else: return g.edata["e"] with F.record_grad(): if partial: g.pull(nid, builtin_msg(lhs, rhs, 'm'), builtin_red('m', 'r1')) else: g.update_all(builtin_msg(lhs, rhs, 'm'), builtin_red('m', 'r1')) r1 = g.ndata.pop('r1') F.backward(F.reduce_sum(r1)) lhs_grad_1 = F.grad(target_feature_switch(g, lhs)) rhs_grad_1 = F.grad(target_feature_switch(g, rhs)) # reset grad g.ndata['u'] = F.attach_grad(F.clone(hu)) g.ndata['v'] = F.attach_grad(F.clone(hv)) g.edata['e'] = F.attach_grad(F.clone(he)) def target_switch(edges, target): if target == "u": return edges.src elif target == "v": return edges.dst elif target == "e": return edges.data else: assert(0), "Unknown target {}".format(target) def mfunc(edges): op = getattr(F, binary_op) lhs_data = target_switch(edges, lhs)[lhs] rhs_data = target_switch(edges, rhs)[rhs] # NOTE(zihao): we need to do batched broadcast # e.g. (68, 3, 1) op (68, 5, 3, 4) while F.ndim(lhs_data) < F.ndim(rhs_data): lhs_data = F.unsqueeze(lhs_data, 1) while F.ndim(rhs_data) < F.ndim(lhs_data): rhs_data = F.unsqueeze(rhs_data, 1) return {"m": op(lhs_data, rhs_data)} def rfunc(nodes): op = getattr(F, reducer) return {"r2": op(nodes.mailbox['m'], 1)} with F.record_grad(): if partial: g.pull(nid, mfunc, rfunc) else: g.update_all(mfunc, rfunc) r2 = g.ndata.pop('r2') F.backward(F.reduce_sum(r2), F.tensor([1.])) lhs_grad_2 = F.grad(target_feature_switch(g, lhs)) rhs_grad_2 = F.grad(target_feature_switch(g, rhs)) rtol = 1e-4 atol = 1e-4 def _print_error(a, b): print("ERROR: Test {}_{}_{}_{} broadcast: {} partial: {}". format(lhs, binary_op, rhs, reducer, broadcast, partial)) return if lhs == 'u': lhs_data = hu elif lhs == 'v': lhs_data = hv elif lhs == 'e': lhs_data = he if rhs == 'u': rhs_data = hu elif rhs == 'v': rhs_data = hv elif rhs == 'e': rhs_data = he print("lhs", F.asnumpy(lhs_data).tolist()) print("rhs", F.asnumpy(rhs_data).tolist()) for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y, rtol, atol): print('@{} {} v.s. {}'.format(i, x, y)) if not F.allclose(r1, r2, rtol, atol): _print_error(r1, r2) assert F.allclose(r1, r2, rtol, atol) if not F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol): print("left grad") _print_error(lhs_grad_1, lhs_grad_2) assert(F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol)) if not F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol): print("right grad") _print_error(rhs_grad_1, rhs_grad_2) assert(F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol)) g = dgl.DGLGraph() g.add_nodes(20) # NOTE(zihao): add self-loop to avoid zero-degree nodes. g.add_edges(g.nodes(), g.nodes()) for i in range(2, 18): g.add_edge(0, i) g.add_edge(1, i) g.add_edge(i, 18) g.add_edge(i, 19) g.add_edge(18, 0) g.add_edge(18, 1) g.add_edge(19, 0) g.add_edge(19, 1) g = g.to(F.ctx()) nid = F.tensor([0, 1, 4, 5, 7, 12, 14, 15, 18, 19], g.idtype) target = ["u", "v", "e"] for lhs, rhs in product(target, target): if lhs == rhs: continue for binary_op in ["add", "sub", "mul", "div"]: for reducer in ["sum", "max", "min", "mean"]: for broadcast in ["none", lhs, rhs]: for partial in [False, True]: print(lhs, rhs, binary_op, reducer, broadcast, partial) _test(g, lhs, rhs, binary_op, reducer, partial, nid, > broadcast=broadcast) tests/compute/test_kernel.py:363: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_kernel.py:250: in _test g.update_all(builtin_msg(lhs, rhs, 'm'), builtin_red('m', 'r1')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python/dgl/core.py:266: in invoke_gsddmm z = op(graph, x, y) python/dgl/ops/sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[-0.2139, 0.4581, 0.2703, 0.4869], [-0.2918, -0.3631, 0.4084, -0.4314], [-0.4247, 0... [ 0.2560, 0.2322, 0.2193, -0.4617], [ 0.3370, -0.1000, 0.4172, 0.3279]]]], requires_grad=True) rhs_data = tensor([[[[-2.4563e-01, -4.7115e-02, -3.0174e-01, -3.5703e-01], [ 4.8955e-01, -4.1437e-01, 1.3228e-01, -1.4...5.2199e-02, -3.8938e-01], [ 1.5332e-01, 5.9728e-02, -6.3271e-02, -3.7373e-01]]]], requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- u v add sum none False ______________________ test_mean_zero_degree[g0-idtype0] _______________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo-zero-degree'])) def test_mean_zero_degree(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.ones((g.number_of_nodes(), 3)) > g.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'x')) tests/compute/test_kernel.py:370: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ______________________ test_mean_zero_degree[g0-idtype1] _______________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo-zero-degree'])) def test_mean_zero_degree(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.ones((g.number_of_nodes(), 3)) > g.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'x')) tests/compute/test_kernel.py:370: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError __________________________ test_unary_copy_u[idtype0] __________________________ idtype = torch.int32 @parametrize_idtype def test_unary_copy_u(idtype): def _test(mfunc, rfunc): g = create_test_heterograph_2(idtype) g0 = create_test_heterograph(idtype) g1 = create_test_heterograph_large(idtype) cross_reducer = rfunc.__name__ x1 = F.randn((g.num_nodes('user'), feat_size)) x2 = F.randn((g.num_nodes('developer'), feat_size)) F.attach_grad(x1) F.attach_grad(x2) g.nodes['user'].data['h'] = x1 g.nodes['developer'].data['h'] = x2 ################################################################# # multi_update_all(): call msg_passing separately for each etype ################################################################# with F.record_grad(): g.multi_update_all( {etype : (mfunc('h', 'm'), rfunc('m', 'y')) for etype in g.canonical_etypes}, cross_reducer) r1 = g.nodes['game'].data['y'].clone() r2 = g.nodes['user'].data['y'].clone() r3 = g.nodes['player'].data['y'].clone() loss = r1.sum() + r2.sum() + r3.sum() F.backward(loss) n_grad1 = F.grad(g.nodes['user'].data['h']).clone() n_grad2 = F.grad(g.nodes['developer'].data['h']).clone() g.nodes['user'].data.clear() g.nodes['developer'].data.clear() g.nodes['game'].data.clear() g.nodes['player'].data.clear() ################################################################# # update_all(): call msg_passing for all etypes ################################################################# F.attach_grad(x1) F.attach_grad(x2) g.nodes['user'].data['h'] = x1 g.nodes['developer'].data['h'] = x2 with F.record_grad(): g.update_all(mfunc('h', 'm'), rfunc('m', 'y')) r4 = g.nodes['game'].data['y'] r5 = g.nodes['user'].data['y'] r6 = g.nodes['player'].data['y'] loss = r4.sum() + r5.sum() + r6.sum() F.backward(loss) n_grad3 = F.grad(g.nodes['user'].data['h']) n_grad4 = F.grad(g.nodes['developer'].data['h']) assert F.allclose(r1, r4) assert F.allclose(r2, r5) assert F.allclose(r3, r6) assert(F.allclose(n_grad1, n_grad3)) assert(F.allclose(n_grad2, n_grad4)) > _test(fn.copy_u, fn.sum) tests/compute/test_new_update_all_hetero.py:132: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_new_update_all_hetero.py:94: in _test cross_reducer) python/dgl/heterograph.py:5023: in multi_update_all all_out[dtid].append(core.message_passing(g, mfunc, rfunc, afunc)) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.2540e+00, -9.2633e-01], [-1.7815e+00, -1.5279e+00], [-7.5881e-01, 9.6034e-01], [ ...-1.6601e+00, -1.6416e+00], [-1.4363e+00, 4.7783e-01], [ 1.0850e+00, 1.3749e-01]], requires_grad=True) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError __________________________ test_unary_copy_u[idtype1] __________________________ idtype = torch.int64 @parametrize_idtype def test_unary_copy_u(idtype): def _test(mfunc, rfunc): g = create_test_heterograph_2(idtype) g0 = create_test_heterograph(idtype) g1 = create_test_heterograph_large(idtype) cross_reducer = rfunc.__name__ x1 = F.randn((g.num_nodes('user'), feat_size)) x2 = F.randn((g.num_nodes('developer'), feat_size)) F.attach_grad(x1) F.attach_grad(x2) g.nodes['user'].data['h'] = x1 g.nodes['developer'].data['h'] = x2 ################################################################# # multi_update_all(): call msg_passing separately for each etype ################################################################# with F.record_grad(): g.multi_update_all( {etype : (mfunc('h', 'm'), rfunc('m', 'y')) for etype in g.canonical_etypes}, cross_reducer) r1 = g.nodes['game'].data['y'].clone() r2 = g.nodes['user'].data['y'].clone() r3 = g.nodes['player'].data['y'].clone() loss = r1.sum() + r2.sum() + r3.sum() F.backward(loss) n_grad1 = F.grad(g.nodes['user'].data['h']).clone() n_grad2 = F.grad(g.nodes['developer'].data['h']).clone() g.nodes['user'].data.clear() g.nodes['developer'].data.clear() g.nodes['game'].data.clear() g.nodes['player'].data.clear() ################################################################# # update_all(): call msg_passing for all etypes ################################################################# F.attach_grad(x1) F.attach_grad(x2) g.nodes['user'].data['h'] = x1 g.nodes['developer'].data['h'] = x2 with F.record_grad(): g.update_all(mfunc('h', 'm'), rfunc('m', 'y')) r4 = g.nodes['game'].data['y'] r5 = g.nodes['user'].data['y'] r6 = g.nodes['player'].data['y'] loss = r4.sum() + r5.sum() + r6.sum() F.backward(loss) n_grad3 = F.grad(g.nodes['user'].data['h']) n_grad4 = F.grad(g.nodes['developer'].data['h']) assert F.allclose(r1, r4) assert F.allclose(r2, r5) assert F.allclose(r3, r6) assert(F.allclose(n_grad1, n_grad3)) assert(F.allclose(n_grad2, n_grad4)) > _test(fn.copy_u, fn.sum) tests/compute/test_new_update_all_hetero.py:132: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_new_update_all_hetero.py:94: in _test cross_reducer) python/dgl/heterograph.py:5023: in multi_update_all all_out[dtid].append(core.message_passing(g, mfunc, rfunc, afunc)) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 8.7427e-01, 9.6819e-01], [ 1.1609e+00, -9.4198e-01], [-2.0202e-01, 1.3531e+00], [-... 8.8002e-01, 5.7814e-01], [-1.3564e+00, -3.6910e-01], [ 8.1194e-01, -1.6130e+00]], requires_grad=True) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError __________________________ test_unary_copy_e[idtype0] __________________________ idtype = torch.int32 @parametrize_idtype def test_unary_copy_e(idtype): def _test(mfunc, rfunc): g = create_test_heterograph_large(idtype) g0 = create_test_heterograph_2(idtype) g1 = create_test_heterograph(idtype) cross_reducer = rfunc.__name__ x1 = F.randn((g.num_edges('plays'),feat_size)) x2 = F.randn((g.num_edges('follows'),feat_size)) x3 = F.randn((g.num_edges('develops'),feat_size)) x4 = F.randn((g.num_edges('wishes'),feat_size)) F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) F.attach_grad(x4) g['plays'].edata['eid'] = x1 g['follows'].edata['eid'] = x2 g['develops'].edata['eid'] = x3 g['wishes'].edata['eid'] = x4 ################################################################# # multi_update_all(): call msg_passing separately for each etype ################################################################# with F.record_grad(): g.multi_update_all( {'plays' : (mfunc('eid', 'm'), rfunc('m', 'y')), 'follows': (mfunc('eid', 'm'), rfunc('m', 'y')), 'develops': (mfunc('eid', 'm'), rfunc('m', 'y')), 'wishes': (mfunc('eid', 'm'), rfunc('m', 'y'))}, cross_reducer) r1 = g.nodes['game'].data['y'].clone() r2 = g.nodes['user'].data['y'].clone() loss = r1.sum() + r2.sum() F.backward(loss) e_grad1 = F.grad(g['develops'].edata['eid']).clone() e_grad2 = F.grad(g['plays'].edata['eid']).clone() e_grad3 = F.grad(g['wishes'].edata['eid']).clone() e_grad4 = F.grad(g['follows'].edata['eid']).clone() {etype : (g[etype].edata.clear()) for _, etype, _ in g.canonical_etypes}, ################################################################# # update_all(): call msg_passing for all etypes ################################################################# # TODO(Israt): output type can be None in multi_update and empty F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) F.attach_grad(x4) g['plays'].edata['eid'] = x1 g['follows'].edata['eid'] = x2 g['develops'].edata['eid'] = x3 g['wishes'].edata['eid'] = x4 with F.record_grad(): g.update_all(mfunc('eid', 'm'), rfunc('m', 'y')) r3 = g.nodes['game'].data['y'] r4 = g.nodes['user'].data['y'] loss = r3.sum() + r4.sum() F.backward(loss) e_grad5 = F.grad(g['develops'].edata['eid']) e_grad6 = F.grad(g['plays'].edata['eid']) e_grad7 = F.grad(g['wishes'].edata['eid']) e_grad8 = F.grad(g['follows'].edata['eid']) # # correctness check def _print_error(a, b): for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y): print('@{} {} v.s. {}'.format(i, x, y)) assert F.allclose(r1, r3) assert F.allclose(r2, r4) assert(F.allclose(e_grad1, e_grad5)) assert(F.allclose(e_grad2, e_grad6)) assert(F.allclose(e_grad3, e_grad7)) assert(F.allclose(e_grad4, e_grad8)) > _test(fn.copy_e, fn.sum) tests/compute/test_new_update_all_hetero.py:217: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_new_update_all_hetero.py:168: in _test cross_reducer) python/dgl/heterograph.py:5023: in multi_update_all all_out[dtid].append(core.message_passing(g, mfunc, rfunc, afunc)) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[ 0.6293, 1.5118], [ 0.4452, 0.8534], [-1.0854, -0.2637], ..., [ 1.5691, -1.2203], [-0.5798, 0.1200], [-0.7006, -1.1834]], requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError __________________________ test_unary_copy_e[idtype1] __________________________ idtype = torch.int64 @parametrize_idtype def test_unary_copy_e(idtype): def _test(mfunc, rfunc): g = create_test_heterograph_large(idtype) g0 = create_test_heterograph_2(idtype) g1 = create_test_heterograph(idtype) cross_reducer = rfunc.__name__ x1 = F.randn((g.num_edges('plays'),feat_size)) x2 = F.randn((g.num_edges('follows'),feat_size)) x3 = F.randn((g.num_edges('develops'),feat_size)) x4 = F.randn((g.num_edges('wishes'),feat_size)) F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) F.attach_grad(x4) g['plays'].edata['eid'] = x1 g['follows'].edata['eid'] = x2 g['develops'].edata['eid'] = x3 g['wishes'].edata['eid'] = x4 ################################################################# # multi_update_all(): call msg_passing separately for each etype ################################################################# with F.record_grad(): g.multi_update_all( {'plays' : (mfunc('eid', 'm'), rfunc('m', 'y')), 'follows': (mfunc('eid', 'm'), rfunc('m', 'y')), 'develops': (mfunc('eid', 'm'), rfunc('m', 'y')), 'wishes': (mfunc('eid', 'm'), rfunc('m', 'y'))}, cross_reducer) r1 = g.nodes['game'].data['y'].clone() r2 = g.nodes['user'].data['y'].clone() loss = r1.sum() + r2.sum() F.backward(loss) e_grad1 = F.grad(g['develops'].edata['eid']).clone() e_grad2 = F.grad(g['plays'].edata['eid']).clone() e_grad3 = F.grad(g['wishes'].edata['eid']).clone() e_grad4 = F.grad(g['follows'].edata['eid']).clone() {etype : (g[etype].edata.clear()) for _, etype, _ in g.canonical_etypes}, ################################################################# # update_all(): call msg_passing for all etypes ################################################################# # TODO(Israt): output type can be None in multi_update and empty F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) F.attach_grad(x4) g['plays'].edata['eid'] = x1 g['follows'].edata['eid'] = x2 g['develops'].edata['eid'] = x3 g['wishes'].edata['eid'] = x4 with F.record_grad(): g.update_all(mfunc('eid', 'm'), rfunc('m', 'y')) r3 = g.nodes['game'].data['y'] r4 = g.nodes['user'].data['y'] loss = r3.sum() + r4.sum() F.backward(loss) e_grad5 = F.grad(g['develops'].edata['eid']) e_grad6 = F.grad(g['plays'].edata['eid']) e_grad7 = F.grad(g['wishes'].edata['eid']) e_grad8 = F.grad(g['follows'].edata['eid']) # # correctness check def _print_error(a, b): for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y): print('@{} {} v.s. {}'.format(i, x, y)) assert F.allclose(r1, r3) assert F.allclose(r2, r4) assert(F.allclose(e_grad1, e_grad5)) assert(F.allclose(e_grad2, e_grad6)) assert(F.allclose(e_grad3, e_grad7)) assert(F.allclose(e_grad4, e_grad8)) > _test(fn.copy_e, fn.sum) tests/compute/test_new_update_all_hetero.py:217: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_new_update_all_hetero.py:168: in _test cross_reducer) python/dgl/heterograph.py:5023: in multi_update_all all_out[dtid].append(core.message_passing(g, mfunc, rfunc, afunc)) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[-0.0052, 2.0500], [-0.7121, -0.0811], [-0.1134, -0.1402], ..., [-0.6976, -0.4215], [ 0.9653, -0.8273], [-0.4365, -1.6862]], requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________________ test_binary_op[idtype0] ____________________________ idtype = torch.int32 @parametrize_idtype def test_binary_op(idtype): def _test(lhs, rhs, binary_op, reducer): g = create_test_heterograph(idtype) x1 = F.randn((g.num_nodes('user'), feat_size)) x2 = F.randn((g.num_nodes('developer'), feat_size)) x3 = F.randn((g.num_nodes('game'), feat_size)) F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) g.nodes['user'].data['h'] = x1 g.nodes['developer'].data['h'] = x2 g.nodes['game'].data['h'] = x3 x1 = F.randn((4,feat_size)) x2 = F.randn((4,feat_size)) x3 = F.randn((3,feat_size)) x4 = F.randn((3,feat_size)) F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) F.attach_grad(x4) g['plays'].edata['h'] = x1 g['follows'].edata['h'] = x2 g['develops'].edata['h'] = x3 g['wishes'].edata['h'] = x4 builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs) builtin_msg = getattr(fn, builtin_msg_name) builtin_red = getattr(fn, reducer) ################################################################# # multi_update_all(): call msg_passing separately for each etype ################################################################# with F.record_grad(): g.multi_update_all( {etype : (builtin_msg('h', 'h', 'm'), builtin_red('m', 'y')) for etype in g.canonical_etypes}, 'sum') r1 = g.nodes['game'].data['y'] F.backward(r1, F.ones(r1.shape)) n_grad1 = F.grad(r1) ################################################################# # update_all(): call msg_passing for all etypes ################################################################# g.update_all(builtin_msg('h', 'h', 'm'), builtin_red('m', 'y')) r2 = g.nodes['game'].data['y'] F.backward(r2, F.ones(r2.shape)) n_grad2 = F.grad(r2) # correctness check def _print_error(a, b): for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y): print('@{} {} v.s. {}'.format(i, x, y)) if not F.allclose(r1, r2): _print_error(r1, r2) assert F.allclose(r1, r2) # TODO (Israt): r1 and r2 have different frad func associated with # if not F.allclose(n_grad1, n_grad2): # print('node grad') # _print_error(n_grad1, n_grad2) # assert(F.allclose(n_grad1, n_grad2)) target = ["u", "v", "e"] for lhs, rhs in product(target, target): if lhs == rhs: continue for binary_op in ["add", "sub", "mul", "div"]: # TODO(Israt) :Add support for reduce func "max", "min", "mean" for reducer in ["sum"]: print(lhs, rhs, binary_op, reducer) > _test(lhs, rhs, binary_op, reducer) tests/compute/test_new_update_all_hetero.py:300: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_new_update_all_hetero.py:264: in _test 'sum') python/dgl/heterograph.py:5023: in multi_update_all all_out[dtid].append(core.message_passing(g, mfunc, rfunc, afunc)) python/dgl/core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python/dgl/core.py:266: in invoke_gsddmm z = op(graph, x, y) python/dgl/ops/sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[ 1.7124, 0.1878], [ 1.1194, -1.8616]], requires_grad=True) rhs_data = tensor([[ 0.3744, 1.5753], [-0.3141, -0.7006]], requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- u v add sum ___________________________ test_binary_op[idtype1] ____________________________ idtype = torch.int64 @parametrize_idtype def test_binary_op(idtype): def _test(lhs, rhs, binary_op, reducer): g = create_test_heterograph(idtype) x1 = F.randn((g.num_nodes('user'), feat_size)) x2 = F.randn((g.num_nodes('developer'), feat_size)) x3 = F.randn((g.num_nodes('game'), feat_size)) F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) g.nodes['user'].data['h'] = x1 g.nodes['developer'].data['h'] = x2 g.nodes['game'].data['h'] = x3 x1 = F.randn((4,feat_size)) x2 = F.randn((4,feat_size)) x3 = F.randn((3,feat_size)) x4 = F.randn((3,feat_size)) F.attach_grad(x1) F.attach_grad(x2) F.attach_grad(x3) F.attach_grad(x4) g['plays'].edata['h'] = x1 g['follows'].edata['h'] = x2 g['develops'].edata['h'] = x3 g['wishes'].edata['h'] = x4 builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs) builtin_msg = getattr(fn, builtin_msg_name) builtin_red = getattr(fn, reducer) ################################################################# # multi_update_all(): call msg_passing separately for each etype ################################################################# with F.record_grad(): g.multi_update_all( {etype : (builtin_msg('h', 'h', 'm'), builtin_red('m', 'y')) for etype in g.canonical_etypes}, 'sum') r1 = g.nodes['game'].data['y'] F.backward(r1, F.ones(r1.shape)) n_grad1 = F.grad(r1) ################################################################# # update_all(): call msg_passing for all etypes ################################################################# g.update_all(builtin_msg('h', 'h', 'm'), builtin_red('m', 'y')) r2 = g.nodes['game'].data['y'] F.backward(r2, F.ones(r2.shape)) n_grad2 = F.grad(r2) # correctness check def _print_error(a, b): for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())): if not np.allclose(x, y): print('@{} {} v.s. {}'.format(i, x, y)) if not F.allclose(r1, r2): _print_error(r1, r2) assert F.allclose(r1, r2) # TODO (Israt): r1 and r2 have different frad func associated with # if not F.allclose(n_grad1, n_grad2): # print('node grad') # _print_error(n_grad1, n_grad2) # assert(F.allclose(n_grad1, n_grad2)) target = ["u", "v", "e"] for lhs, rhs in product(target, target): if lhs == rhs: continue for binary_op in ["add", "sub", "mul", "div"]: # TODO(Israt) :Add support for reduce func "max", "min", "mean" for reducer in ["sum"]: print(lhs, rhs, binary_op, reducer) > _test(lhs, rhs, binary_op, reducer) tests/compute/test_new_update_all_hetero.py:300: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_new_update_all_hetero.py:264: in _test 'sum') python/dgl/heterograph.py:5023: in multi_update_all all_out[dtid].append(core.message_passing(g, mfunc, rfunc, afunc)) python/dgl/core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python/dgl/core.py:266: in invoke_gsddmm z = op(graph, x, y) python/dgl/ops/sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[-0.4307, -0.1323], [-0.5036, 0.5018]], requires_grad=True) rhs_data = tensor([[ 0.1916, -0.3977], [-0.0392, -0.7487]], requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- u v add sum ___________________________ test_sum_case1[idtype0] ____________________________ idtype = torch.int32 @parametrize_idtype def test_sum_case1(idtype): # NOTE: If you want to update this test case, remember to update the docstring # example too!!! g1 = dgl.graph(([0, 1], [1, 0]), idtype=idtype, device=F.ctx()) g1.ndata['h'] = F.tensor([1., 2.]) g2 = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) g2.ndata['h'] = F.tensor([1., 2., 3.]) bg = dgl.batch([g1, g2]) bg.ndata['w'] = F.tensor([.1, .2, .1, .5, .2]) > assert F.allclose(F.tensor([3.]), dgl.sum_nodes(g1, 'h')) tests/compute/test_readout.py:20: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:180: in sum_nodes return readout_nodes(graph, feat, weight, ntype=ntype, op='sum') python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum', x = tensor([1., 2.]), offsets = tensor([0, 2]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_sum_case1[idtype1] ____________________________ idtype = torch.int64 @parametrize_idtype def test_sum_case1(idtype): # NOTE: If you want to update this test case, remember to update the docstring # example too!!! g1 = dgl.graph(([0, 1], [1, 0]), idtype=idtype, device=F.ctx()) g1.ndata['h'] = F.tensor([1., 2.]) g2 = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) g2.ndata['h'] = F.tensor([1., 2., 3.]) bg = dgl.batch([g1, g2]) bg.ndata['w'] = F.tensor([.1, .2, .1, .5, .2]) > assert F.allclose(F.tensor([3.]), dgl.sum_nodes(g1, 'h')) tests/compute/test_readout.py:20: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:180: in sum_nodes return readout_nodes(graph, feat, weight, ntype=ntype, op='sum') python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum', x = tensor([1., 2.]), offsets = tensor([0, 2]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g0-idtype0] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-2.8364, -1.0353, -0.4919], [-1.1800, 0.6876, 1.3861], [ 0.3296, 0.4225, 1.9144], ....7921], [-0.8496, 0.1152, 1.1855], [-1.7852, 0.0727, -0.8690], [-1.4106, -0.8359, 0.8846]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g0-idtype1] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-1.0782, 0.6476, -0.8538], [-0.5916, -0.9811, 0.9358], [ 0.7833, -2.0918, -2.3020], ....5363], [-0.3733, -1.6018, 0.4269], [-0.1997, -1.3495, -1.6612], [-0.7860, 0.0297, 0.8318]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g1-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-1.0739, 1.6836, -0.8769], [ 0.9023, 0.4539, -0.1565], [-0.3857, -2.4024, -0.7915], ....0675], [ 2.0740, -0.9526, 1.2514], [-1.9887, -0.1769, 0.3457], [ 0.5959, 1.6257, -0.4776]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g1-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.7592, -1.9412, -0.2083], [ 1.6470, 0.3246, 0.7294], [-0.4415, 1.6107, 0.7801], ....0334], [ 1.1004, -0.0910, 1.1176], [-0.2845, -0.2347, -0.4729], [-0.5175, 0.0828, 0.1533]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g2-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-1.4882, -0.2959, 0.1846], [-0.4123, 0.2662, 1.7657], [ 0.6045, -1.1730, 1.1152], ....0098], [-0.2651, 0.1838, 0.1709], [ 1.1290, -1.6773, 1.0562], [ 0.5711, -0.2179, -0.0673]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g2-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-1.0299, 0.2888, -1.1753], [-0.2036, 1.4174, 0.6362], [ 1.0356, -0.2404, 0.5936], ....9475], [ 1.3080, 0.1385, 0.0250], [-0.3133, 0.9439, 2.1647], [-0.3130, 0.2468, -1.6515]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g3-idtype0] ______________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.5612, -0.8833, -0.0137], [ 0.3736, 1.3757, 0.5183], [ 0.1549, 0.3168, 0.8680], [-0.4952, 1.3115, -3.0724], [-0.1829, -0.1217, 0.0108]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g3-idtype1] ______________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.0659, 1.7096, 1.7523], [-0.6923, 0.2334, 1.0874], [ 0.2114, -1.9964, 1.1410], [ 1.1908, 0.4091, -0.5994], [ 1.4762, -0.1851, -1.1883]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g4-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.9318, 1.3094, -0.7576], [-1.1828, -1.2134, -1.5229], [-0.4690, -1.6581, 0.7433], ....0472], [-1.2740, 0.4992, 0.0797], [-1.5507, -0.7189, -0.5618], [-0.7115, -0.3167, 1.2862]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g4-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-1.2567, 0.6579, 1.4155], [ 0.0826, 0.0606, -0.2362], [-0.9078, -1.1937, -0.4678], ....1282], [-1.6351, 0.3001, 1.5766], [-2.1818, -0.6137, 0.6100], [ 0.5433, 1.5816, -0.6341]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g5-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.9684, -0.0986, -2.1648], [ 1.4855, 0.5254, 0.1432], [ 0.2317, 1.1700, -0.7859], ....5633], [ 0.1775, 0.5587, -0.2780], [-1.2737, 0.3181, -0.4133], [ 0.6354, 0.4366, 0.1938]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g5-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.2859, -2.1922, -0.7229], [-1.8786, 1.4057, -0.7536], [-0.4350, -0.3864, -0.0662], ....0198], [ 0.4340, -0.5945, 0.4388], [-0.5108, -2.4906, -0.4912], [-0.2969, -1.1603, -0.8026]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g6-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 1.4495, 1.3584, 0.2090], [-0.5432, 0.9094, 0.4453], [ 1.5122, -0.4356, 0.0107], ....3282], [-1.0932, 1.0396, 0.7743], [ 1.1792, -0.6445, -0.1553], [-0.6456, 0.0351, 0.3879]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[sum-g6-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.9179, -0.2448, 0.7839], [-0.2048, -1.8162, -0.1200], [-1.8167, 0.9537, -0.3597], ....4443], [ 1.2055, -0.3588, 1.2357], [ 1.3663, -0.4251, 0.0981], [-0.1938, -1.0618, -0.2631]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g0-idtype0] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 1.7384, 0.7851, -1.3229], [-2.5093, -0.6567, -0.5050], [ 0.0304, -0.7496, 0.6551], ....6980], [ 0.8913, -0.7262, -0.7476], [ 1.5244, -2.2297, 0.0631], [ 1.1767, 0.2626, 0.1108]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g0-idtype1] ______________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.1971, 1.3030, 1.5339], [ 0.7505, 0.9339, 1.2065], [-0.5800, 0.1585, -0.1759], ....4128], [ 0.3614, -1.5498, 0.3387], [-1.4349, -0.4409, 1.1713], [-0.1676, -1.7781, 1.3261]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g1-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 1.4524, 1.0101, -1.4148], [-0.0199, 0.8429, 0.1273], [-1.0890, 1.2716, -1.2364], ....4446], [-1.2056, -0.1659, -0.1270], [ 2.1496, -0.6172, -0.1679], [-1.2433, -1.9676, -0.6706]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g1-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.1172, -1.5030, -0.5167], [-0.1370, -0.5816, 1.4138], [ 1.6629, 0.8165, 0.4193], ....0616], [-0.7221, 0.2972, -2.5609], [ 1.4580, -0.8247, 0.6954], [ 0.0260, 0.3529, 1.6332]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g2-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.1360, 0.0952, 1.0855], [-0.9518, -0.8557, -0.7348], [ 0.9089, 0.6412, 0.8596], ....2154], [ 0.7787, -1.1165, -0.6668], [-0.5932, -1.4121, 0.5853], [-0.9795, 2.1147, 0.2790]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g2-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.5900, -0.2583, -0.4099], [ 1.1172, -2.4816, 1.7347], [-1.7869, -1.1873, -0.5998], ....1463], [ 0.2329, -1.2721, 0.6777], [ 0.3531, -0.7692, 0.2427], [-0.1919, -1.6321, -1.4145]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g3-idtype0] ______________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.7597, -0.1085, 0.9324], [-1.1951, -0.7605, 0.3853], [-0.7983, -1.5080, -2.0970], [-0.0378, -0.4593, 1.2322], [-0.5752, -0.5472, -0.1621]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g3-idtype1] ______________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.3216, -0.0237, 1.2853], [ 0.4177, 0.8905, -0.2841], [ 0.8723, 0.1649, 0.0287], [-0.0258, -1.0771, 0.4485], [ 1.2325, -1.3257, 0.9312]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g4-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-1.1656, -1.5166, -1.1086], [ 1.2280, 0.2463, -1.3887], [ 1.4164, 0.7573, 0.8159], ....3492], [ 0.3382, 1.1115, -0.4812], [-0.0650, -0.4075, 1.6826], [ 0.2590, 1.5538, -0.3788]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g4-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-1.0307, -2.4815, -1.1040], [-0.7126, 0.2800, 0.9599], [ 1.2104, -0.8818, -0.2031], ....8811], [ 0.5869, 0.0654, -1.0220], [-0.3045, 0.9668, 1.8083], [ 0.5610, 0.0356, -0.5141]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g5-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.5707, 0.3315, 0.2343], [ 0.5113, -0.6062, -0.2481], [ 1.8136, -1.6279, 0.8133], ....9845], [ 1.0555, -0.6158, -0.6891], [ 0.8164, -1.1297, 0.7604], [-0.9752, 1.0832, 1.3712]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g5-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.7425, -0.2770, -0.1798], [ 0.3136, -1.6532, -0.2974], [ 1.3246, -1.0963, 0.7130], ....1270], [ 0.5998, 1.5538, 1.1494], [-1.0788, 1.7510, 0.7316], [ 0.3548, 2.8994, -1.6148]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g6-idtype0] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.0824, 0.4856, 1.3446], [-1.4574, 0.2896, -1.1125], [ 1.5695, -0.3545, 0.5547], ....5782], [ 0.0834, 0.4634, -0.3549], [-0.7650, 0.1975, 0.1324], [ 1.7680, -1.9302, 0.0125]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[max-g6-idtype1] ______________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-1.4912, -1.1968, -0.8199], [-1.2686, 0.4707, 0.8057], [-1.2792, -0.0106, -0.6686], ....6055], [ 1.4023, 1.2641, -1.6673], [ 0.1817, -0.2396, -1.5206], [ 0.3942, 0.3143, -1.0493]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g0-idtype0] _____________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.8727, -0.0405, -0.3006], [-1.3837, -0.9498, -0.4831], [ 0.5086, -0.6589, 0.3464], ....1500], [ 0.8424, 0.5004, -0.5893], [ 0.5795, -0.6771, 0.7701], [ 1.4044, -1.3909, 0.4566]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g0-idtype1] _____________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.1080, 1.4928, 0.7905], [ 0.5214, 1.0339, 0.1882], [ 0.2188, -0.0341, 0.3122], ....5653], [ 0.9583, -1.7611, 0.6450], [ 0.7494, 1.1224, 0.1799], [ 0.7639, 0.3151, 0.3090]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g1-idtype0] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-1.6878, 0.5801, 0.6904], [-0.4028, 0.5830, -0.4169], [ 0.2618, -0.3212, 0.0323], ....0605], [ 0.9768, -0.1534, -0.2137], [-1.0656, -1.1702, 0.1521], [-0.3097, -1.1240, -2.4908]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g1-idtype1] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.0266, 0.3205, 0.6258], [-0.4271, -1.4287, 1.6424], [ 0.5641, -1.0218, -0.5197], ....0323], [-1.7294, 0.1164, -1.0827], [-0.4149, 1.2868, 0.0692], [-0.8454, 1.0687, 1.2230]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g2-idtype0] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.3977, -0.7196, 0.4419], [ 0.4705, 1.0076, 0.7802], [ 0.0820, 0.1748, -1.2564], ....5503], [ 1.4653, -0.4887, -1.5526], [-0.1370, -0.8826, 0.7302], [ 1.1000, -2.1532, -0.0598]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g2-idtype1] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.1221, -0.0688, 0.5091], [-0.3279, 1.0434, 0.3342], [ 1.7591, -0.9773, 0.2410], ....0199], [ 1.0838, 1.5097, -1.4376], [ 1.4138, 1.0552, 0.1734], [ 0.0873, -2.3705, 0.1102]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g3-idtype0] _____________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.8321, 1.3178, 0.5952], [ 0.5472, -0.8827, -0.0050], [-0.9443, -3.5292, -0.5378], [-1.3447, 0.2092, -1.0280], [-1.4643, 1.1571, 0.0487]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g3-idtype1] _____________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-1.6457, -0.5760, -0.4526], [ 0.3032, 0.0465, -1.0869], [ 1.3555, -0.1523, -1.5895], [ 2.7079, -0.9616, 0.1727], [ 0.8902, -1.3105, 0.5742]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g4-idtype0] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-1.6001, -1.5004, 0.7876], [ 0.1058, 0.9451, -1.5262], [ 0.0306, 1.6976, 0.3043], ....2891], [ 1.0479, 0.1818, 0.8173], [-0.2006, -0.2820, -0.7820], [ 0.2447, -0.2099, -0.1746]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g4-idtype1] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.8169, 0.1152, -1.2259], [ 0.5931, -0.8041, -0.5575], [-0.6959, 1.7790, -0.9705], ....1865], [-0.6379, -1.3150, 0.4085], [-0.9715, 0.4371, -0.5713], [ 0.5112, -0.2477, 1.4977]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g5-idtype0] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 1.0167, -0.5613, -0.2234], [ 1.6827, 0.1945, 0.6440], [-0.2404, -2.0453, 0.7849], ....1146], [-0.3541, -0.9578, 0.2126], [-0.3597, 2.5869, 1.0755], [-1.1646, 1.9828, -0.1719]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g5-idtype1] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.0517, 0.5746, -1.0183], [ 0.2621, 1.1710, -0.7309], [ 0.2695, -0.8435, -0.3373], ....7484], [ 1.0682, 0.1011, 0.7573], [ 0.3541, -0.0729, -0.0553], [-0.3816, -1.1214, 1.6027]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g6-idtype0] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 1.8807, 0.0703, -0.2096], [ 0.3712, -2.3564, -1.2583], [ 1.5721, -0.7247, -0.9661], ....1051], [-0.6164, -0.6354, -0.6497], [-0.1298, 0.0505, -0.0793], [ 0.4194, -0.2703, 0.5401]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _____________________ test_reduce_readout[mean-g6-idtype1] _____________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', op=reducer) tests/compute/test_readout.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.3477, 0.7987, 1.4922], [ 0.3608, 0.7453, -0.3156], [-0.3867, -0.3508, 0.2761], ....6832], [-1.5721, 0.8892, 0.6916], [ 0.9180, -1.0732, 0.1784], [ 0.8905, 1.9989, -0.5284]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g0-idtype0] _________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.3261, 1.5683, 0.3717], [-0.0552, -0.0162, 0.0074], [-1.0557, 0.1204, -0.1036], ....0230], [-0.0046, 0.3150, -0.2855], [ 1.0539, 0.7194, -0.0299], [-0.3368, -1.1975, 0.5502]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g0-idtype1] _________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-2.3089e-01, 1.7783e+00, 4.7157e-01], [ 4.2602e-01, -5.2966e-01, -5.1168e-01], [ 3.2777e-01...4e-03, 3.5090e-02], [-7.8913e-01, -7.0228e-01, -1.2504e-01], [ 2.4140e+00, -3.1584e+00, -3.2709e-01]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g1-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-2.7431e-01, -1.0017e+00, -1.4377e+00], [-1.5117e-02, -1.5229e-01, -9.6422e-02], [ 1.1204e-01...1e-01, -1.4309e-01], [ 3.1655e-01, 4.8792e-02, 5.0287e-02], [ 1.8599e+00, -4.2643e-01, 1.5963e+00]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g1-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.0609, -0.7648, 0.3178], [ 0.1062, 0.0182, 0.1126], [ 0.4784, -0.2483, 0.3041], ....8023], [ 0.0274, -0.1061, -0.0986], [ 4.1400, -0.6954, 1.9493], [-0.2705, 0.0893, 0.1558]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g2-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-2.4056e-01, -2.5571e-03, 1.0755e-01], [ 8.3282e-01, 1.8654e+00, 6.0927e-01], [ 1.1808e-01...2e+00, -5.3770e-01], [-3.6995e-03, 1.5944e-01, -1.0481e+00], [ 2.8367e-02, -1.8220e-02, -2.9318e-02]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g2-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-4.2779e-01, 2.7114e-01, 3.3167e-01], [-2.4456e-03, -1.6760e-03, -5.3544e-03], [ 6.8029e-01...9e-01, 9.4353e-02], [-2.4849e+00, 3.9758e-01, -6.7391e-01], [-8.5060e-01, 9.6941e-02, 1.3614e+00]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g3-idtype0] _________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=(...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.3566, -0.5339, -0.9003], [-1.6285, 0.8738, 0.7481], [-1.8880, -0.4768, -0.4826], [ 0.2898, -0.2470, -0.3093], [-1.0060, 0.6507, -1.0606]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g3-idtype1] _________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=(...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.2539, 0.2932, 0.0576], [ 0.0495, 0.1193, -0.1960], [-0.4644, -0.1863, -0.1056], [-1.3326, -0.5687, 0.5925], [ 0.1628, 0.2018, -0.2711]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g4-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.1003, -0.0900, -0.1079], [ 0.7191, -0.5974, 0.0046], [ 0.1975, 0.1559, 0.0793], ....9948], [-0.5396, -1.6410, 0.0145], [ 0.6199, -0.8014, 0.2945], [ 0.0652, -0.1054, -0.0825]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g4-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.4924, 0.7035, 0.9131], [-0.3724, -0.0819, 0.2000], [ 0.0489, 0.0233, -0.0277], ....2523], [-0.4642, 0.2772, -0.7558], [ 0.5147, -0.7762, -2.0423], [ 1.2267, -1.1129, 0.2893]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g5-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.1222, -0.5543, -0.0077], [-0.7472, 0.5584, 0.4276], [ 0.8668, 0.8769, -1.4978], ....0272], [ 1.1129, -0.0422, 0.9648], [ 0.9933, -0.6892, -0.6968], [ 0.0500, 0.1541, 0.0567]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g5-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.5806, -0.6718, 0.3574], [-1.0148, -0.6845, 2.7806], [-0.2204, -0.4630, 0.1793], ....0114], [ 1.9290, 2.8398, 3.1674], [ 0.2915, 1.0609, 1.3786], [ 1.6323, -2.5928, -0.6880]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g6-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.9680, -1.0404, 0.8732], [-0.0295, 1.0369, -0.2261], [-0.7792, 0.5111, -0.0585], ....5771], [ 1.0211, -0.5391, -0.6120], [-0.1570, 2.5063, 1.9306], [ 0.5131, 0.9295, -0.8811]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[sum-g6-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'sum' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.5698, -0.1317, -0.5012], [-0.2583, -0.4771, -1.7902], [-0.0925, 0.1902, 0.7287], ....3116], [ 0.9801, -0.0865, 0.9227], [ 0.4284, 5.6297, 0.1502], [ 0.2716, 0.7330, 0.3709]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g0-idtype0] _________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.0879, -0.0826, 0.0020], [-0.4365, 0.3302, -0.1149], [-0.2789, -0.4708, 0.7520], ....5525], [ 1.5727, -1.0589, -0.8505], [-0.3607, -0.0251, -0.2194], [ 0.7491, 0.9505, -1.2048]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g0-idtype1] _________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.4921, -0.2597, -0.1347], [-3.4025, -0.0409, 1.7869], [-3.8097, 1.1494, 2.0548], ....7685], [-0.7364, 0.2780, 0.0973], [-0.3737, 0.0868, -0.0499], [ 0.1862, -2.4873, -2.3914]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g1-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-1.9412, -0.6770, 0.7690], [ 0.3755, -1.4493, -0.5529], [ 1.0548, 0.2091, 0.0991], ....3125], [-0.3521, 0.1380, -0.0551], [-0.0052, 0.1446, -0.0127], [ 0.2298, 0.1160, -0.2141]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g1-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-4.0073e-01, -7.7875e-01, -3.5032e-01], [-3.3865e-01, -3.4744e-01, -2.0073e+00], [-3.3429e-01...7e-02, 1.9831e-01], [-6.3930e-02, 5.9300e-01, 1.9979e+00], [ 6.4975e-01, -6.8334e-02, -8.4643e-01]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g2-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 1.7226, 0.4079, 0.8019], [ 0.1403, 0.0506, 0.4294], [-0.2672, -0.1885, -0.0904], ....7886], [-0.3799, 0.6025, 2.4005], [ 0.1458, 0.2526, 0.3558], [ 1.5102, 1.8874, 0.0850]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g2-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.6061, -0.7247, -0.8487], [-0.0567, 0.0166, 0.1725], [ 0.6027, 1.9727, -0.1648], ....7824], [ 0.0152, -0.0384, -0.0352], [-1.9871, 0.3338, 0.9092], [ 0.0436, -0.3636, 0.3027]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g3-idtype0] _________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=(...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-1.7234, 0.5737, -0.2500], [ 0.0092, -0.0608, 0.0025], [ 0.5151, 0.3537, 0.1296], [-0.0327, 0.0353, 0.0146], [-0.2840, -0.0938, -0.1171]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g3-idtype1] _________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=(...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.0271, 0.1810, 0.1791], [ 0.3765, -0.1090, -0.1773], [-0.6442, 0.8015, 0.8410], [ 0.1916, 0.2012, -0.0722], [ 1.4329, 2.6177, -0.5049]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g4-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.0607, -0.0991, -0.1694], [-0.2003, -0.4722, 0.4881], [ 0.2429, 0.1810, 0.1022], ....0652], [-0.0423, -0.4084, -0.1811], [-2.5105, 1.8462, 0.2897], [-0.4721, -1.4015, 0.3187]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g4-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-2.8877, 0.6455, -3.0974], [ 0.3863, -2.0574, 2.6114], [-0.3358, 0.8232, -0.1851], ....3492], [ 0.4503, 0.0439, -0.0213], [-0.0146, 0.2251, 0.0260], [ 0.0678, 0.0282, -0.0149]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g5-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-1.0212, 0.0493, -0.2019], [ 0.3759, -0.0021, 0.2231], [ 0.0205, 0.5992, 0.1468], ....6854], [-0.0567, -0.1725, 0.0459], [ 0.0893, -0.3036, 0.7135], [ 0.0231, -0.6701, 0.1761]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g5-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.0308, -0.0115, 0.0434], [-0.4963, 0.5435, -1.3471], [ 0.0979, -0.2925, -0.2294], ....4143], [-0.0064, 0.0050, -0.0085], [-0.4626, 3.2726, 0.3691], [ 0.4667, -0.7063, -0.5483]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g6-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.4035, 2.9957, 0.5774], [ 0.5056, -0.3924, -0.5739], [-1.4574, 1.2098, -0.4511], ....0713], [ 1.2320, -0.3895, -0.0845], [ 2.9534, -3.7960, -1.5338], [-0.0540, 0.4729, 0.6305]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________ test_weighted_reduce_readout[max-g6-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'max' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.3097, -0.5468, 0.4783], [ 0.6620, -0.0558, -0.2572], [ 0.1138, -0.0472, -0.0407], ....4437], [-0.1779, 0.1042, 0.2034], [ 0.0322, -0.0650, 0.2189], [-0.0704, -1.0004, 0.3870]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g0-idtype0] _________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-4.0132e-01, -8.1206e-03, 1.9132e-01], [ 6.0274e-01, 1.2888e+00, 9.5444e-02], [-2.2819e+00...4e+00, -4.6069e+00], [-3.0383e-02, 1.5486e-01, -3.0273e-01], [-1.2977e+00, 2.8006e-01, -2.5003e+00]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g0-idtype1] _________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.7265, -1.3938, -0.3944], [-0.8316, 0.6839, 1.1794], [-1.7994, 0.6211, 3.5598], ....2780], [ 0.8004, -0.0206, 0.0812], [ 0.2971, 0.0442, 0.5900], [ 0.7341, -1.0076, 0.3092]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g1-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 4.3781e-01, -1.9306e-01, 1.0732e+00], [ 1.5201e-01, -4.9974e-03, 9.5987e-03], [ 8.4022e-02...9e-01, -1.0259e+00], [ 1.3449e-02, -3.6590e-02, 1.0202e-02], [ 1.3410e-03, -1.9022e-01, -1.8057e-02]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g1-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 2.1439, 2.0308, -1.0265], [-0.0661, 0.0459, 0.1228], [-0.6473, -0.5812, -0.1082], ....7476], [ 0.0767, -0.1040, -0.0318], [-0.6893, -0.2154, -0.4976], [ 0.0307, -0.6631, 0.5475]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g2-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 1.8865, 5.6161, -1.2546], [ 0.0079, 0.0154, 0.0075], [-0.0181, 0.2520, 0.2798], ....0702], [ 0.5145, -0.1976, -0.2253], [-0.1450, -0.2248, 0.1099], [ 0.0996, 0.0460, 0.0462]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g2-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.1665, -0.0128, -0.0827], [ 0.0415, -0.1121, -0.1093], [ 0.1783, -0.0734, 0.0322], ....4245], [-0.0314, 1.1551, -0.8841], [-0.1896, -0.0453, -0.0073], [ 0.5840, 0.1745, 0.2435]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g3-idtype0] _________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=(...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.1922, 1.4144, 0.8833], [-0.1818, 0.7012, -0.6259], [-0.0160, 0.0318, -0.1081], [ 1.0078, -0.1794, 1.2910], [-1.2566, -0.7348, 0.7364]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g3-idtype1] _________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape=(...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.1942, -0.2705, 0.4601], [-0.1262, 0.3511, -0.2058], [-0.3586, 0.2104, 1.5683], [ 1.0828, 0.3755, 0.0134], [ 0.9884, 2.1038, -0.0933]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g4-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 2.2951e+00, -1.1828e+00, 2.5587e-01], [-4.9547e-02, -6.4390e-02, -9.1058e-02], [-7.8794e-01...0e-01, -4.4991e-01], [-1.9321e-03, 2.8101e-03, -8.0224e-04], [-9.9687e-01, 5.3161e-02, -6.3797e-02]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g4-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.4826, -2.2434, -0.0068], [ 0.4720, -1.4129, -0.9489], [-0.3964, 0.9945, 0.4331], ....3243], [ 0.2590, 0.2810, 1.9278], [ 0.0173, 0.0087, 0.0111], [-3.0435, 0.9749, -0.9331]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g5-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-1.7193e-01, 5.8989e-01, -1.5774e+00], [ 1.0644e-02, 1.8932e-01, -1.1007e-01], [ 1.1406e-01...0e+00, 4.0478e+00], [ 4.2893e-02, 1.4893e-01, -5.5555e-01], [-4.6487e-01, 1.8672e-01, 1.2928e+00]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g5-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...t32)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 1.2014, -0.5730, -0.5063], [ 0.4955, -0.2203, -0.7970], [ 0.2087, 0.3711, -0.3147], ....8257], [-4.2873, 1.0210, 1.0732], [ 0.4456, -0.3542, 0.1283], [ 0.4178, -0.9784, -0.8325]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g6-idtype0] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int32, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[ 0.7105, -0.8271, 0.8896], [ 0.0520, -0.0729, 0.1912], [-0.0373, 0.1154, 0.0228], ....9000], [ 1.0504, 1.7663, -0.4968], [ 2.6396, -1.0414, 0.0641], [ 0.0183, -0.0402, 0.1095]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________ test_weighted_reduce_readout[mean-g6-idtype1] _________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32), 'w': Scheme(shape...=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32), 'w': Scheme(shape=(1,), dtype=torch.float32)}) idtype = torch.int64, reducer = 'mean' @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) @pytest.mark.parametrize('reducer', ['sum', 'max', 'mean']) def test_weighted_reduce_readout(g, idtype, reducer): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.ndata['w'] = F.randn((g.number_of_nodes(), 1)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) g.edata['w'] = F.randn((g.number_of_edges(), 1)) # Test.1: node readout > x = dgl.readout_nodes(g, 'h', 'w', op=reducer) tests/compute/test_readout.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:88: in readout_nodes return segment.segment_reduce(graph.batch_num_nodes(ntype), x, reducer=op) python/dgl/ops/segment.py:46: in segment_reduce rst = F.segment_reduce('sum', value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum' x = tensor([[-0.1080, 0.3017, 0.0071], [-0.3957, 0.9836, -0.4031], [ 1.2907, 0.3195, 0.8144], ....1357], [ 0.8530, 0.5273, -2.2813], [ 0.9468, 0.4147, 5.6728], [-0.3218, 0.1146, -1.3979]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g0-idtype0] ___________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.1967, -0.6725, 2.4015], [-0.5067, -1.1432, 0.3985], [ 0.6828, -1.0130, 1.7629], ....2395], [ 0.7006, 0.0037, 0.1807], [ 0.1524, 0.8077, 1.3621], [-1.6351, 0.5322, 0.0109]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g0-idtype1] ___________________________ g = Graph(num_nodes=9, num_edges=15, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 1.9846, 0.3620, -0.8714], [-0.0357, 0.2804, -0.6070], [-0.1033, -1.3570, -1.0853], ....2408], [ 0.6750, 0.1024, -0.9977], [-0.6691, -0.2334, 0.2348], [ 0.1748, -0.4843, 0.3442]]) offsets = tensor([0, 4, 7, 9]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g1-idtype0] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.0597, 1.3889, -0.6126], [-0.5570, 1.0783, 0.9740], [ 1.2673, 0.1520, 1.5479], ....8510], [ 1.8830, 0.3579, -1.4092], [ 0.9364, 0.2506, -2.0436], [-1.1689, -1.1922, -0.5850]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g1-idtype1] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.4272, 1.1201, 0.2017], [-0.3338, -0.3309, -0.2551], [-0.6120, 1.1263, -0.5782], ....8505], [ 0.8496, 0.8871, -0.0713], [-0.4711, 1.1334, -0.8519], [ 0.8204, 1.3008, -0.2745]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g2-idtype0] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.1719, 0.3248, -0.6994], [ 1.2418, -0.6625, 0.0904], [ 1.6104, 1.0404, -0.6698], ....0333], [-1.2233, 0.6460, 0.8322], [-0.7795, -0.4347, -0.0181], [-1.4169, 0.6527, 2.5954]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g2-idtype1] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.4628, 1.1958, -0.2905], [-0.6382, -1.2386, 0.8152], [-1.2265, -0.8516, 0.9339], ....0838], [-1.3084, -1.4412, 0.2435], [ 0.3760, 0.4614, -0.3473], [ 2.3081, 1.5383, 2.0966]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g3-idtype0] ___________________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-5.1826e-01, 1.7971e+00, -1.6173e-01], [ 8.5485e-01, -1.0742e+00, -8.9070e-01], [-3.2164e-01...7e-01, 1.0889e-03], [-1.6908e-01, -3.1009e-03, 7.4899e-01], [ 2.6297e-01, 8.9996e-02, -1.4489e+00]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g3-idtype1] ___________________________ g = Graph(num_nodes=5, num_edges=6, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.3507, 0.7733, -0.3646], [-0.1068, 1.1619, -0.3371], [-0.8295, 1.1541, 1.0698], [-0.1500, 1.3992, -0.9389], [ 0.0714, 0.4344, -1.0874]]) offsets = tensor([0, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g4-idtype0] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.0303, -0.9491, 0.5930], [-0.5404, -0.2061, -0.9835], [ 0.6187, 0.9843, -1.3149], ....5153], [-0.2069, 0.5945, 2.2292], [ 0.1288, 0.0337, 0.1120], [-0.0752, -1.3872, -1.1065]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g4-idtype1] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.7190, -1.3882, -0.1445], [ 0.2737, 1.1646, -0.5754], [ 0.0918, 1.5806, -0.5103], ....0502], [ 1.7123, -2.2311, -0.3382], [ 0.4354, 1.7656, -0.0854], [ 3.0380, -0.2942, 1.7542]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g5-idtype0] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.8460, -1.9264, 0.2734], [ 0.8382, 1.3117, -0.3001], [-0.0914, -0.3878, 0.5347], ....2002], [ 0.9320, -0.7572, -0.9204], [-0.2679, 1.9172, 1.6622], [ 1.7413, -0.2491, 0.5903]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g5-idtype1] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-0.1390, -0.3065, 0.4020], [-0.4218, -1.2013, -1.1217], [-0.2980, -1.4770, 0.8092], ....2537], [-0.6519, 1.6374, -1.7824], [-1.6721, 2.1290, -0.7148], [-0.2255, -0.4443, 1.0330]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g6-idtype0] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int32 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[-1.6747, -0.7123, 0.6029], [ 2.0078, 0.8582, 0.4981], [ 0.9494, -1.2424, 0.5749], ....4684], [-0.4354, -0.9709, 0.6248], [ 0.6197, 0.8547, 0.4541], [ 0.0598, 0.4052, -0.9054]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ___________________________ test_softmax[g6-idtype1] ___________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(3,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32), 'h': Scheme(shape=(2,), dtype=torch.float32)}) idtype = torch.int64 @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['homo'], exclude=['dglgraph'])) def test_softmax(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.randn((g.number_of_nodes(), 3)) g.edata['h'] = F.randn((g.number_of_edges(), 2)) # Test.1: node readout > x = dgl.softmax_nodes(g, 'h') tests/compute/test_readout.py:192: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/readout.py:284: in softmax_nodes return segment.segment_softmax(graph.batch_num_nodes(ntype), x) python/dgl/ops/segment.py:98: in segment_softmax value_max = segment_reduce(seglen, value, reducer='max') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'max' x = tensor([[ 0.0224, 0.0601, 0.8085], [ 1.0668, 0.7681, 0.9605], [ 1.2172, -2.2719, 1.0764], ....1963], [-1.2099, -0.9392, -0.9143], [ 0.8604, 0.4746, -2.2591], [ 1.9504, 1.0406, -0.5393]]) offsets = tensor([ 0, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ______________________ test_spmm[idtype0-sum-add-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[[[[1.7988], [1.9738], [1.8492]]], [[[1.7822], [1.9642], ...1]]], [[[1.1523], [1.8091], [1.4629]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8393]], [[1.1441]], [[1.7414]]]], [[[[1.7275]], [[1.667... [[[[1.9997]], [[1.3344]], [[1.4938]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={} edata_schemes={}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[[[[1.6991], [1.3707], [1.4296]]], [[[1.5545], [1.1239], ...7]]], [[[1.8556], [1.3231], [1.3123]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3227]], [[1.1139]], [[1.6915]]]], [[[[1.2954]], [[1.140... [[[[1.5742]], [[1.7554]], [[1.6285]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[1.6375, 1.7040, 1.9483], [1.6280, 1.7046, 1.6080], [1.4972, 1.8846, 1.8243]], [[1...7053], [1.7492, 1.3863, 1.6557], [1.0881, 1.8065, 1.9345]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.6039, 1.7525, 1.4452]], [[1.0998, 1.4284, 1.1972]], [[1.9479, 1.7439, 1.4954]], ...]], [[1.5731, 1.8088, 1.6805]], [[1.1558, 1.4835, 1.8496]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[1.1872, 1.7318, 1.1416], [1.2360, 1.5106, 1.5875], [1.0379, 1.6934, 1.5637]], [[1...8325], [1.9628, 1.7323, 1.7679], [1.0295, 1.8598, 1.2713]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.3805, 1.9984, 1.3804]], [[1.2945, 1.3385, 1.1105]], [[1.1655, 1.7655, 1.7866]], ...]], [[1.7121, 1.4845, 1.9628]], [[1.2331, 1.8344, 1.5940]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.9404], [1.5371], [1.8836], [1.2111], [1.4610], [1.9024], [1...981], [1.6544], [1.4571], [1.8558], [1.3309]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2955, 1.4762, 1.6936], [1.0636, 1.0177, 1.2249], [1.7148, 1.1689, 1.1909], [1.5305,... 1.3840], [1.0711, 1.1178, 1.5841], [1.2570, 1.6639, 1.4979]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.0196], [1.5406], [1.3206], [1.6903], [1.2002], [1.1520], [1...255], [1.9055], [1.0519], [1.1033], [1.8468]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2513, 1.3992, 1.1699], [1.1009, 1.7682, 1.4494], [1.7635, 1.5298, 1.4288], [1.3746,... 1.7827], [1.0477, 1.9134, 1.2878], [1.4969, 1.6770, 1.7768]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.5347, 1.8398, 1.8706], [1.1133, 1.4291, 1.9496], [1.7728, 1.0964, 1.1372], [1.1396,... 1.9298], [1.7124, 1.3413, 1.8794], [1.7815, 1.6723, 1.7826]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6344], [1.2153], [1.5949], [1.7928], [1.7063], [1.5364], [1...660], [1.6560], [1.2985], [1.5984], [1.2720]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.6106, 1.5596, 1.6960], [1.3212, 1.1348, 1.2166], [1.7009, 1.1504, 1.2053], [1.6894,... 1.6111], [1.2993, 1.8395, 1.9852], [1.4233, 1.7940, 1.7565]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9598], [1.1862], [1.1602], [1.7684], [1.8662], [1.8374], [1...194], [1.0936], [1.9684], [1.4152], [1.7579]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.6792], [1.7253], [1.9835], [1.5296], [1.9047], [1.0949], [1...156], [1.5139], [1.2748], [1.0495], [1.2839]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8760], [1.2263], [1.1637], [1.3442], [1.8018], [1.1687], [1...451], [1.9836], [1.2814], [1.8288], [1.1753]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.6298], [1.3763], [1.5548], [1.0676], [1.0084], [1.4410], [1...982], [1.9721], [1.0902], [1.9185], [1.1756]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6589], [1.6065], [1.0723], [1.4644], [1.5275], [1.4710], [1...053], [1.2265], [1.3697], [1.8278], [1.1642]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([1.3055, 1.4002, 1.0308, 1.8993, 1.3034, 1.1198, 1.2363, 1.1388, 1.4134, 1.8861, 1.8042, 1.0242, 1.4917...2283, 1.5901, 1.5505, 1.5083, 1.6590, 1.1661, 1.1199, 1.7411, 1.4893], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.0858, 1.3805, 1.4047, 1.2790, 1.7066, 1.8015, 1.4226, 1.9157, 1.0914, 1.5754, 1.7787, 1.1304, 1.1554...7728, 1.1475, 1.7822, 1.0086, 1.5114, 1.0413, 1.7404, 1.2910, 1.5359], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-add-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([1.7120, 1.6719, 1.2374, 1.4007, 1.3862, 1.4946, 1.4730, 1.4063, 1.0453, 1.6854, 1.8517, 1.0627, 1.8465...7072, 1.8136, 1.2734, 1.9800, 1.5682, 1.4515, 1.5760, 1.2391, 1.9564], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.7235, 1.0480, 1.3563, 1.5806, 1.6711, 1.2613, 1.7576, 1.4966, 1.1601, 1.6759, 1.5343, 1.8077, 1.0834...3140, 1.5604, 1.4091, 1.7468, 1.8287, 1.2366, 1.4802, 1.9232, 1.4034], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[[[[1.4805], [1.4058], [1.6997]]], [[[1.8374], [1.6162], ...8]]], [[[1.2626], [1.4467], [1.5986]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.7703]], [[-1.4230]], [[-1.4124]]]], [[[[-1.1790]], [[-... [[[[-1.5550]], [[-1.7774]], [[-1.3281]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[[[[1.2516], [1.3863], [1.2933]]], [[[1.1787], [1.9850], ...7]]], [[[1.0899], [1.6965], [1.1708]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.1369]], [[-1.7458]], [[-1.3634]]]], [[[[-1.3718]], [[-... [[[[-1.1812]], [[-1.4360]], [[-1.7928]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[1.1791, 1.3372, 1.8695], [1.5076, 1.2234, 1.2826], [1.4083, 1.5960, 1.7498]], [[1...2752], [1.2401, 1.8662, 1.9926], [1.9347, 1.0054, 1.3359]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.9264, -1.8565, -1.3119]], [[-1.1963, -1.8883, -1.7505]], [[-1.8197, -1.4651, -1.8237]],...1.4999, -1.9605, -1.4011]], [[-1.6672, -1.4501, -1.4816]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[1.5206, 1.9339, 1.0833], [1.2695, 1.4418, 1.4243], [1.1122, 1.4374, 1.2612]], [[1...5994], [1.6962, 1.0114, 1.4815], [1.8979, 1.9793, 1.3215]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.1712, -1.4582, -1.4906]], [[-1.1365, -1.9007, -1.7136]], [[-1.6691, -1.4544, -1.2016]],...1.2868, -1.5125, -1.1145]], [[-1.3599, -1.6483, -1.7072]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.1123], [1.9208], [1.5630], [1.2850], [1.6032], [1.3009], [1...404], [1.4406], [1.9476], [1.4543], [1.3749]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8453, -1.1886, -1.5568], [-1.3840, -1.6391, -1.1400], [-1.4007, -1.5679, -1.0582], ... [-1.8283, -1.9624, -1.2588], [-1.1225, -1.4817, -1.4477]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.0784], [1.4055], [1.9026], [1.6320], [1.3372], [1.3157], [1...719], [1.9869], [1.1495], [1.5442], [1.5053]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.3930, -1.4457, -1.7824], [-1.0067, -1.1247, -1.8807], [-1.0412, -1.4318, -1.1551], ... [-1.3699, -1.1305, -1.9950], [-1.9934, -1.5551, -1.2210]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.1425, 1.0545, 1.6091], [1.4111, 1.2635, 1.5326], [1.2408, 1.6082, 1.0603], [1.4984,... 1.5712], [1.0820, 1.2713, 1.5015], [1.9589, 1.0775, 1.9009]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.2473], [-1.9782], [-1.4914], [-1.8235], [-1.3040], [-1.4999], ... [-1.8185], [-1.0769], [-1.0625], [-1.8108]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.6933, 1.9630, 1.2219], [1.8793, 1.5275, 1.2808], [1.1967, 1.5776, 1.8460], [1.3277,... 1.8764], [1.5420, 1.8778, 1.6654], [1.2725, 1.7009, 1.3823]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.2569], [-1.8180], [-1.3741], [-1.3436], [-1.2602], [-1.2169], ... [-1.9400], [-1.9570], [-1.4328], [-1.9951]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.2760], [1.0470], [1.1414], [1.7306], [1.7177], [1.4677], [1...510], [1.2984], [1.1867], [1.1724], [1.9103]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.9894], [-1.1105], [-1.3630], [-1.7839], [-1.6529], [-1.8446], ... [-1.5321], [-1.2838], [-1.5264], [-1.7479]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.6244], [1.5533], [1.8909], [1.6866], [1.3093], [1.5936], [1...122], [1.8332], [1.9697], [1.3892], [1.3096]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.9767], [-1.8773], [-1.5309], [-1.1591], [-1.4181], [-1.7704], ... [-1.1628], [-1.9562], [-1.4900], [-1.2409]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([1.3037, 1.0742, 1.1153, 1.5434, 1.8342, 1.8203, 1.8247, 1.9058, 1.9678, 1.0746, 1.7946, 1.6702, 1.1102...0085, 1.8335, 1.4440, 1.4577, 1.0415, 1.4778, 1.9637, 1.5857, 1.1518], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.0224, -1.3550, -1.6578, -1.5635, -1.8304, -1.5426, -1.3127, -1.7301, -1.5008, -1.0747, -1.3295, -1....9896, -1.8214, -1.3959, -1.0630, -1.9159, -1.9049, -1.1866], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-sub-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([1.7508, 1.9074, 1.1235, 1.1536, 1.4819, 1.6190, 1.8066, 1.1496, 1.8230, 1.8883, 1.1991, 1.3882, 1.5839...8291, 1.3387, 1.2565, 1.0445, 1.6726, 1.8034, 1.9525, 1.0970, 1.3303], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.2986, -1.2146, -1.6727, -1.6179, -1.1804, -1.6827, -1.2872, -1.7017, -1.4716, -1.5743, -1.9886, -1....8991, -1.4532, -1.6248, -1.0588, -1.4869, -1.8691, -1.5706], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[[[[1.0140], [1.8979], [1.2494]]], [[[1.0599], [1.1319], ...0]]], [[[1.6851], [1.6047], [1.5512]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2067]], [[1.7983]], [[1.0364]]]], [[[[1.7821]], [[1.704... [[[[1.8652]], [[1.2216]], [[1.7332]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[[[[1.5903], [1.9912], [1.7561]]], [[[1.5368], [1.1804], ...0]]], [[[1.6191], [1.6275], [1.8715]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3511]], [[1.9281]], [[1.9746]]]], [[[[1.6182]], [[1.147... [[[[1.8363]], [[1.1885]], [[1.0999]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[1.4824, 1.9225, 1.8137], [1.8712, 1.9302, 1.9007], [1.5347, 1.7434, 1.3775]], [[1...6349], [1.9670, 1.9923, 1.0215], [1.2903, 1.8675, 1.0671]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.1824, 1.1320, 1.1169]], [[1.0353, 1.3255, 1.8268]], [[1.9570, 1.7670, 1.3943]], ...]], [[1.3645, 1.6154, 1.1872]], [[1.8106, 1.2568, 1.1374]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[1.7139, 1.3087, 1.1682], [1.1509, 1.4921, 1.4540], [1.4166, 1.6132, 1.3436]], [[1...8489], [1.5328, 1.6525, 1.5892], [1.7500, 1.4858, 1.8740]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.1014, 1.4299, 1.1164]], [[1.8178, 1.3236, 1.9239]], [[1.9086, 1.6516, 1.9862]], ...]], [[1.7102, 1.5560, 1.2902]], [[1.2849, 1.1691, 1.5025]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.5549], [1.5018], [1.2902], [1.4946], [1.4263], [1.7023], [1...145], [1.7701], [1.9742], [1.2869], [1.3221]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5514, 1.6477, 1.8085], [1.6523, 1.4142, 1.7243], [1.0711, 1.0453, 1.7096], [1.9693,... 1.8290], [1.1880, 1.0347, 1.1098], [1.3033, 1.2928, 1.8547]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.2846], [1.0639], [1.2068], [1.3675], [1.4064], [1.5291], [1...626], [1.0486], [1.6086], [1.0466], [1.9739]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9435, 1.1584, 1.4398], [1.6969, 1.2328, 1.6207], [1.5595, 1.1947, 1.2301], [1.7986,... 1.8437], [1.6271, 1.6477, 1.4210], [1.1381, 1.2383, 1.1529]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.2918, 1.5036, 1.2547], [1.0635, 1.5564, 1.4829], [1.5631, 1.7641, 1.5696], [1.6122,... 1.7212], [1.7771, 1.5790, 1.1386], [1.6392, 1.7817, 1.5953]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7887], [1.7018], [1.1979], [1.5527], [1.9940], [1.9516], [1...506], [1.9117], [1.3456], [1.5112], [1.5642]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.8916, 1.6317, 1.8441], [1.1313, 1.1656, 1.0077], [1.7796, 1.4778, 1.9393], [1.3855,... 1.2277], [1.5208, 1.9864, 1.6008], [1.5470, 1.5941, 1.8383]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7527], [1.8805], [1.2918], [1.1471], [1.2333], [1.6424], [1...431], [1.7083], [1.9557], [1.6104], [1.2041]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.7251], [1.3680], [1.1051], [1.7769], [1.0065], [1.7938], [1...096], [1.7214], [1.6984], [1.4404], [1.8507]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5406], [1.4890], [1.0032], [1.0295], [1.2767], [1.0226], [1...663], [1.1327], [1.2312], [1.9686], [1.7805]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.3252], [1.2135], [1.3169], [1.2841], [1.1833], [1.2280], [1...694], [1.6213], [1.8859], [1.8936], [1.7161]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7259], [1.9144], [1.8004], [1.8633], [1.2997], [1.7188], [1...321], [1.3693], [1.3387], [1.9556], [1.8871]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([1.7029, 1.3322, 1.5399, 1.7353, 1.6370, 1.8041, 1.8184, 1.1030, 1.1398, 1.4152, 1.5305, 1.7750, 1.7584...4194, 1.0309, 1.9746, 1.5267, 1.5729, 1.0277, 1.0949, 1.3212, 1.4989], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.5648, 1.8834, 1.0281, 1.1945, 1.0030, 1.5596, 1.6731, 1.7885, 1.0137, 1.3970, 1.4375, 1.7686, 1.3601...0877, 1.8754, 1.6112, 1.5903, 1.9558, 1.2606, 1.9699, 1.1672, 1.3533], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-mul-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([1.2106, 1.1638, 1.7502, 1.7752, 1.1824, 1.9559, 1.9792, 1.1555, 1.9308, 1.1553, 1.9382, 1.2445, 1.1724...2464, 1.5592, 1.2332, 1.3012, 1.4791, 1.9062, 1.6181, 1.5803, 1.7808], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.1990, 1.3084, 1.1776, 1.5920, 1.6564, 1.6520, 1.9476, 1.9358, 1.0350, 1.7133, 1.6433, 1.5288, 1.3158...6278, 1.2669, 1.7353, 1.4297, 1.2671, 1.3093, 1.2830, 1.5184, 1.5004], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[[[[1.9814], [1.9324], [1.2653]]], [[[1.5933], [1.8191], ...3]]], [[[1.7742], [1.9108], [1.2632]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.9068]], [[0.7897]], [[0.9791]]]], [[[[0.7116]], [[0.823... [[[[0.6842]], [[0.7121]], [[0.5817]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[[[[1.7379], [1.8346], [1.3522]]], [[[1.8086], [1.1045], ...2]]], [[[1.7047], [1.7217], [1.0543]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6817]], [[0.8234]], [[0.6176]]]], [[[[0.5502]], [[0.555... [[[[0.5851]], [[0.9794]], [[0.9820]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[1.2131, 1.4117, 1.2080], [1.1355, 1.9336, 1.0511], [1.8324, 1.5133, 1.9061]], [[1...8289], [1.1204, 1.0569, 1.3141], [1.9185, 1.3790, 1.5953]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.5015, 0.6270, 0.9297]], [[0.8215, 0.7039, 0.9560]], [[0.6560, 0.5652, 0.5484]], ... [[0.5059, 0.5406, 0.5858]], [[0.5400, 0.5726, 0.5682]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[1.7545, 1.6713, 1.1842], [1.1259, 1.3147, 1.8533], [1.5601, 1.3238, 1.6004]], [[1...5455], [1.0855, 1.8315, 1.3317], [1.4602, 1.8077, 1.4559]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.9910, 0.7775, 0.5682]], [[0.5167, 0.6960, 0.6588]], [[0.5315, 0.6402, 0.7677]], ... [[0.6230, 0.9383, 0.5791]], [[0.7394, 0.7732, 0.9708]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.1670], [1.3602], [1.2536], [1.5956], [1.7176], [1.0454], [1...736], [1.0338], [1.2382], [1.2489], [1.8060]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9913, 0.7978, 0.6708], [0.8024, 0.6068, 0.6247], [0.9971, 0.6277, 0.5638], [0.5901,...106], [0.5312, 0.7094, 0.8725], [0.5162, 0.6937, 0.6908]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.5734], [1.6739], [1.8208], [1.8869], [1.7064], [1.6033], [1...384], [1.6099], [1.0142], [1.6083], [1.0194]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5283, 0.8915, 0.7014], [0.6830, 0.9152, 0.5703], [0.5962, 0.5433, 0.8491], [0.7636,...396], [0.7938, 0.6626, 0.7125], [0.6083, 0.5288, 0.8110]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.9801, 1.3402, 1.4063], [1.4479, 1.2688, 1.9987], [1.7176, 1.4663, 1.8850], [1.9183,... 1.6661], [1.1677, 1.1264, 1.2122], [1.4312, 1.9284, 1.4458]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9307], [0.5597], [0.5058], [0.9243], [0.5155], [0.7755], [0..., [0.6310], [0.7515], [0.8391], [0.6801]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.9750, 1.6273, 1.2460], [1.8043, 1.3368, 1.6432], [1.2404, 1.7898, 1.6111], [1.8996,... 1.5291], [1.1550, 1.4309, 1.7765], [1.3788, 1.7686, 1.6795]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5229], [0.6807], [0.5414], [0.9952], [0.7800], [0.6691], [0..., [0.5331], [0.5626], [0.5067], [0.5445]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.1620], [1.7116], [1.8465], [1.6238], [1.0184], [1.7162], [1...866], [1.5766], [1.9688], [1.0304], [1.7980]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.8123], [0.8452], [0.7237], [0.8438], [0.6898], [0.5194], [0..., [0.8157], [0.6327], [0.5311], [0.9213]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.0546], [1.5534], [1.7112], [1.8227], [1.9266], [1.9127], [1...348], [1.4884], [1.6006], [1.2941], [1.8381]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7310], [0.8876], [0.5888], [0.5168], [0.7770], [0.5890], [0..., [0.6311], [0.6944], [0.5178], [0.7979]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([1.6844, 1.3725, 1.7066, 1.5253, 1.1052, 1.1859, 1.3106, 1.7606, 1.6430, 1.8171, 1.9313, 1.6043, 1.4135...5946, 1.4130, 1.0098, 1.6895, 1.0750, 1.8854, 1.9464, 1.1018, 1.0511], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.8853, 0.6472, 0.5255, 0.9254, 0.7176, 0.6729, 0.8022, 0.6200, 0.7154, 0.7479, 0.7791, 0.6791, 0.5294..., 0.8327, 0.5068, 0.6025, 0.5513, 0.8490, 0.7701, 0.9602, 0.8285], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype0-sum-div-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([1.1721, 1.5724, 1.5739, 1.1240, 1.5747, 1.6038, 1.7709, 1.5490, 1.8300, 1.8641, 1.2472, 1.3143, 1.7850...3320, 1.3772, 1.9583, 1.5476, 1.3027, 1.9849, 1.1265, 1.3499, 1.9441], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.5075, 0.7333, 0.7726, 0.7481, 0.7334, 0.8213, 0.9013, 0.6387, 0.6288, 0.9491, 0.5956, 0.6901, 0.6190..., 0.5009, 0.7801, 0.6849, 0.5232, 0.6308, 0.6621, 0.5240, 0.6413], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: div, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp0-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[[[[[1.8452], [1.7821], [1.5476]]], [[[1.5195], [1.6259], ...2]]], [[[1.7249], [1.0986], [1.4275]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2940]], [[1.0926]], [[1.2477]]]], [[[[1.8982]], [[1.977... [[[[1.2939]], [[1.1651]], [[1.2239]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp0-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[[[[[1.6214], [1.5827], [1.7938]]], [[[1.1637], [1.7092], ...6]]], [[[1.5474], [1.4579], [1.8201]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9394]], [[1.0724]], [[1.5861]]]], [[[[1.6206]], [[1.377... [[[[1.0879]], [[1.3401]], [[1.8074]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp1-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[[1.5997, 1.8513, 1.7983], [1.9605, 1.3498, 1.3835], [1.3884, 1.0152, 1.3431]], [[1...8978], [1.9586, 1.5630, 1.6509], [1.1073, 1.3057, 1.5721]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.3926, 1.2279, 1.5436]], [[1.9142, 1.7783, 1.5315]], [[1.8053, 1.3856, 1.1803]], ...]], [[1.2818, 1.9247, 1.3314]], [[1.7629, 1.4039, 1.4817]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp1-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[[1.4779, 1.3780, 1.8392], [1.0997, 1.1402, 1.9693], [1.7200, 1.0185, 1.6612]], [[1...4226], [1.5895, 1.4566, 1.8223], [1.1345, 1.6480, 1.5332]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.8881, 1.3324, 1.2988]], [[1.3590, 1.5581, 1.2008]], [[1.6410, 1.7804, 1.3370]], ...]], [[1.6402, 1.2671, 1.9508]], [[1.6335, 1.2026, 1.1520]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp2-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.4593], [1.8479], [1.6698], [1.4151], [1.3091], [1.8983], [1...055], [1.7282], [1.4490], [1.6849], [1.7787]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2791, 1.5762, 1.6729], [1.3757, 1.9369, 1.3147], [1.2084, 1.2129, 1.9397], [1.0443,... 1.3443], [1.9673, 1.0693, 1.0730], [1.1513, 1.1696, 1.2334]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp2-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.8449], [1.7321], [1.9656], [1.6878], [1.7632], [1.9886], [1...581], [1.0305], [1.2214], [1.3983], [1.9188]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7275, 1.5659, 1.2324], [1.4721, 1.1070, 1.8263], [1.2426, 1.2408, 1.6990], [1.4391,... 1.8544], [1.3335, 1.3326, 1.6916], [1.0626, 1.7277, 1.5237]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp3-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.4010, 1.1068, 1.1218], [1.2847, 1.0731, 1.4992], [1.3375, 1.2383, 1.8182], [1.3825,... 1.0318], [1.5544, 1.2142, 1.7238], [1.4998, 1.0758, 1.0448]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5598], [1.0299], [1.8050], [1.8472], [1.0688], [1.1781], [1...552], [1.4060], [1.3798], [1.2749], [1.8655]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp3-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.3793, 1.7244, 1.7848], [1.7753, 1.9518, 1.5453], [1.2922, 1.5085, 1.4221], [1.8086,... 1.2929], [1.4976, 1.3039, 1.9827], [1.5732, 1.5599, 1.7200]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6071], [1.9353], [1.0459], [1.9373], [1.0379], [1.0515], [1...198], [1.9897], [1.4742], [1.6116], [1.0560]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp4-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.3715], [1.1650], [1.3288], [1.6154], [1.6781], [1.4083], [1...551], [1.3351], [1.3725], [1.8450], [1.0347]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1439], [1.3960], [1.5681], [1.0697], [1.5837], [1.5640], [1...335], [1.2699], [1.1305], [1.6417], [1.3109]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp4-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.0093], [1.7665], [1.9353], [1.7445], [1.9016], [1.3253], [1...363], [1.3317], [1.8038], [1.3437], [1.0866]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9193], [1.2632], [1.0993], [1.5874], [1.7641], [1.2434], [1...262], [1.2883], [1.1883], [1.2261], [1.7014]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp5-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([1.4804, 1.6692, 1.4475, 1.2569, 1.9885, 1.0378, 1.1180, 1.7455, 1.7528, 1.2320, 1.3078, 1.5787, 1.5410...1153, 1.2060, 1.6675, 1.0087, 1.3364, 1.1109, 1.1755, 1.4941, 1.1814], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.4830, 1.6412, 1.4840, 1.3205, 1.7716, 1.3604, 1.5086, 1.7159, 1.1792, 1.9845, 1.2988, 1.6342, 1.7570...2785, 1.4247, 1.9105, 1.5178, 1.4165, 1.4882, 1.1144, 1.6186, 1.9452], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_lhs-shp5-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([1.2539, 1.2497, 1.9109, 1.7717, 1.5408, 1.9682, 1.0594, 1.2744, 1.2356, 1.4819, 1.7918, 1.7128, 1.0037...8494, 1.5952, 1.0836, 1.7642, 1.0113, 1.4698, 1.7854, 1.3585, 1.6025], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.0103, 1.1843, 1.1112, 1.4352, 1.1494, 1.2022, 1.3400, 1.4542, 1.7155, 1.7699, 1.5144, 1.5511, 1.7937...2854, 1.2491, 1.4714, 1.2622, 1.8843, 1.5420, 1.9357, 1.8957, 1.5234], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp0-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[[[[[1.9372], [1.5062], [1.7634]]], [[[1.5915], [1.7052], ...5]]], [[[1.9581], [1.8569], [1.6066]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1487]], [[1.2994]], [[1.4454]]]], [[[[1.0139]], [[1.875... [[[[1.6152]], [[1.5851]], [[1.4825]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp0-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[[[[[1.2726], [1.0043], [1.9217]]], [[[1.8386], [1.3569], ...3]]], [[[1.8109], [1.4932], [1.8310]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3101]], [[1.3799]], [[1.7502]]]], [[[[1.8358]], [[1.788... [[[[1.2082]], [[1.7897]], [[1.4945]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp1-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[[1.4471, 1.7304, 1.2646], [1.1210, 1.0169, 1.7056], [1.2252, 1.9382, 1.8745]], [[1...6772], [1.3509, 1.0493, 1.8754], [1.5648, 1.3071, 1.7694]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.4915, 1.3172, 1.4518]], [[1.5083, 1.9429, 1.4559]], [[1.5533, 1.8534, 1.3843]], ...]], [[1.9044, 1.4913, 1.5877]], [[1.5760, 1.1878, 1.8721]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp1-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[[1.9471, 1.9225, 1.2470], [1.3350, 1.4292, 1.9243], [1.6086, 1.4803, 1.6614]], [[1...2617], [1.3292, 1.8883, 1.1294], [1.8375, 1.2971, 1.2105]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.1019, 1.9543, 1.6099]], [[1.7072, 1.9094, 1.9836]], [[1.0363, 1.1956, 1.9940]], ...]], [[1.9793, 1.5195, 1.9470]], [[1.6636, 1.8484, 1.4366]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp2-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.7549], [1.1102], [1.8974], [1.2575], [1.1825], [1.4893], [1...990], [1.3192], [1.2096], [1.6141], [1.2589]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9012, 1.6160, 1.7841], [1.5285, 1.4537, 1.2490], [1.0023, 1.4502, 1.3083], [1.9884,... 1.7266], [1.6124, 1.0339, 1.5981], [1.4972, 1.7703, 1.7003]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp2-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.2147], [1.7237], [1.5705], [1.6183], [1.0065], [1.3441], [1...160], [1.6707], [1.4900], [1.1663], [1.7360]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0355, 1.9598, 1.5015], [1.8614, 1.1366, 1.9378], [1.4584, 1.1857, 1.6562], [1.4596,... 1.4233], [1.6531, 1.8560, 1.3650], [1.2197, 1.9577, 1.8141]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp3-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.5813, 1.4839, 1.9807], [1.3423, 1.1027, 1.5193], [1.0617, 1.2221, 1.5883], [1.1558,... 1.8095], [1.5119, 1.7975, 1.5558], [1.9037, 1.8777, 1.1878]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8334], [1.3905], [1.8482], [1.7682], [1.3395], [1.7422], [1...843], [1.7022], [1.3949], [1.3744], [1.3580]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp3-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.4036, 1.2223, 1.2545], [1.7175, 1.0229, 1.8528], [1.3221, 1.6562, 1.4356], [1.1433,... 1.2122], [1.6242, 1.2114, 1.0879], [1.5247, 1.6059, 1.5591]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9316], [1.5436], [1.3437], [1.2476], [1.4127], [1.6249], [1...268], [1.9566], [1.5833], [1.2654], [1.6868]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp4-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.0060], [1.4769], [1.6433], [1.0928], [1.1657], [1.8613], [1...121], [1.8718], [1.5393], [1.5300], [1.1627]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6606], [1.1083], [1.8680], [1.7075], [1.3026], [1.6015], [1...460], [1.7485], [1.9546], [1.4918], [1.2120]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp4-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.5196], [1.7044], [1.9794], [1.2643], [1.9136], [1.6683], [1...572], [1.4076], [1.1400], [1.1320], [1.5795]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8273], [1.0045], [1.0098], [1.2901], [1.6475], [1.7772], [1...389], [1.8228], [1.0419], [1.0085], [1.5687]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp5-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([1.6120, 1.3162, 1.6949, 1.8935, 1.2788, 1.9166, 1.0778, 1.6760, 1.7876, 1.4875, 1.1897, 1.5618, 1.4345...7248, 1.6266, 1.2074, 1.4787, 1.7711, 1.2018, 1.1761, 1.5323, 1.8047], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.3214, 1.4891, 1.9626, 1.5169, 1.0849, 1.9361, 1.5042, 1.7019, 1.1987, 1.8670, 1.0259, 1.3939, 1.6852...5950, 1.7366, 1.6739, 1.0696, 1.4035, 1.6044, 1.6364, 1.7999, 1.2649], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype0-sum-copy_rhs-shp5-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([1.3657, 1.0981, 1.0405, 1.1034, 1.2694, 1.4782, 1.0436, 1.9800, 1.5801, 1.9809, 1.5041, 1.9831, 1.9126...1679, 1.7116, 1.4544, 1.8331, 1.5969, 1.6357, 1.4404, 1.6976, 1.9940], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.7419, 1.0377, 1.4278, 1.6932, 1.5612, 1.9461, 1.9855, 1.1802, 1.5541, 1.3788, 1.0787, 1.6651, 1.7787...6768, 1.3516, 1.8943, 1.7430, 1.8030, 1.5826, 1.3795, 1.7043, 1.3873], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_rhs, reduce func: sum) ______________________ test_spmm[idtype0-min-add-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[[[[1.8331], [1.9795], [1.0447]]], [[[1.3753], [1.3522], ...4]]], [[[1.0548], [1.4134], [1.9425]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8490]], [[1.2035]], [[1.4295]]]], [[[[1.0357]], [[1.180... [[[[1.7697]], [[1.0179]], [[1.9555]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[[[[1.7598], [1.7258], [1.8360]]], [[[1.7943], [1.6991], ...8]]], [[[1.6300], [1.8575], [1.1807]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3379]], [[1.5832]], [[1.1017]]]], [[[[1.5772]], [[1.861... [[[[1.5361]], [[1.2985]], [[1.3251]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[1.5435, 1.5837, 1.7738], [1.2792, 1.6432, 1.0881], [1.4289, 1.8463, 1.2535]], [[1...6749], [1.1597, 1.8680, 1.6745], [1.9518, 1.2500, 1.9831]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.5431, 1.7479, 1.9989]], [[1.7546, 1.4159, 1.1350]], [[1.8649, 1.0416, 1.7173]], ...]], [[1.4174, 1.9016, 1.6696]], [[1.9209, 1.9884, 1.7468]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[1.4407, 1.8599, 1.1724], [1.3676, 1.7848, 1.8312], [1.5670, 1.7303, 1.1400]], [[1...0320], [1.4921, 1.8360, 1.8549], [1.2586, 1.8199, 1.7974]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.6271, 1.4883, 1.6088]], [[1.4620, 1.3887, 1.6374]], [[1.1160, 1.8566, 1.6175]], ...]], [[1.1234, 1.0635, 1.4670]], [[1.9441, 1.8978, 1.2791]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.9701], [1.6144], [1.6555], [1.3278], [1.8142], [1.6918], [1...681], [1.3357], [1.7781], [1.1575], [1.7445]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8908, 1.0547, 1.1668], [1.5015, 1.8768, 1.0935], [1.9803, 1.1676, 1.4444], [1.8894,... 1.7807], [1.2352, 1.1157, 1.8739], [1.8211, 1.9524, 1.3333]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.2207], [1.5799], [1.9575], [1.2226], [1.3904], [1.3452], [1...205], [1.3084], [1.9328], [1.9173], [1.5066]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5864, 1.6932, 1.1702], [1.7157, 1.5332, 1.6856], [1.4493, 1.1798, 1.5199], [1.4583,... 1.3053], [1.8092, 1.5761, 1.0460], [1.1014, 1.3297, 1.0413]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.4218, 1.6684, 1.9967], [1.4755, 1.4515, 1.0504], [1.8077, 1.8866, 1.3000], [1.1277,... 1.3125], [1.0462, 1.6108, 1.2255], [1.7605, 1.4075, 1.5006]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3694], [1.1933], [1.9002], [1.1960], [1.1058], [1.9496], [1...269], [1.6318], [1.8448], [1.4308], [1.7821]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.3533, 1.4449, 1.0412], [1.1458, 1.5488, 1.5492], [1.2461, 1.8352, 1.0107], [1.0541,... 1.7122], [1.7265, 1.6297, 1.1646], [1.3310, 1.7517, 1.7742]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8748], [1.1020], [1.5378], [1.0238], [1.9516], [1.4934], [1...725], [1.4555], [1.3058], [1.7346], [1.8286]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.6326], [1.4979], [1.5020], [1.8831], [1.0286], [1.0454], [1...594], [1.3666], [1.2028], [1.6072], [1.1485]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2220], [1.9500], [1.4927], [1.0658], [1.1785], [1.6288], [1...541], [1.2058], [1.7027], [1.6747], [1.4677]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.8015], [1.3381], [1.1840], [1.9815], [1.7479], [1.6291], [1...168], [1.4647], [1.3511], [1.6023], [1.8381]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0278], [1.0715], [1.0467], [1.0356], [1.4242], [1.0124], [1...805], [1.6877], [1.4760], [1.8783], [1.5676]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([1.1008, 1.0523, 1.6248, 1.7496, 1.9695, 1.6037, 1.7050, 1.1929, 1.9138, 1.7469, 1.3544, 1.3188, 1.2597...2484, 1.6488, 1.1484, 1.7387, 1.9176, 1.2203, 1.6183, 1.8329, 1.4124], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.3082, 1.4870, 1.2327, 1.3116, 1.6167, 1.5235, 1.1195, 1.7236, 1.0522, 1.4300, 1.7373, 1.1238, 1.7254...1975, 1.7756, 1.4343, 1.9038, 1.1785, 1.3676, 1.7546, 1.7039, 1.8295], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-add-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([1.2745, 1.4446, 1.3047, 1.5860, 1.5518, 1.2751, 1.8491, 1.7185, 1.3725, 1.6673, 1.9251, 1.7839, 1.5065...0298, 1.4580, 1.1515, 1.3048, 1.7336, 1.6770, 1.5970, 1.4968, 1.3797], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.9333, 1.9221, 1.7948, 1.6241, 1.1608, 1.8610, 1.1260, 1.9925, 1.7447, 1.3619, 1.5636, 1.7703, 1.1443...1332, 1.8395, 1.6590, 1.9270, 1.1636, 1.2094, 1.8527, 1.4667, 1.6586], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[[[[1.3514], [1.5375], [1.6178]]], [[[1.7128], [1.7941], ...1]]], [[[1.0381], [1.2439], [1.9254]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.6686]], [[-1.8564]], [[-1.0032]]]], [[[[-1.6100]], [[-... [[[[-1.2270]], [[-1.0472]], [[-1.3987]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[[[[1.4312], [1.1724], [1.6604]]], [[[1.8548], [1.9040], ...4]]], [[[1.8733], [1.5254], [1.0065]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.7280]], [[-1.8349]], [[-1.3683]]]], [[[[-1.8225]], [[-... [[[[-1.0842]], [[-1.8898]], [[-1.1048]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[1.7320, 1.3761, 1.2906], [1.4485, 1.4773, 1.1799], [1.8023, 1.7670, 1.5560]], [[1...3757], [1.8242, 1.8138, 1.6500], [1.3730, 1.6684, 1.1015]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.6311, -1.7257, -1.7990]], [[-1.5951, -1.0861, -1.8905]], [[-1.9447, -1.6224, -1.1484]],...1.2855, -1.6390, -1.4123]], [[-1.2682, -1.1543, -1.6796]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[1.1611, 1.0155, 1.7183], [1.1319, 1.1977, 1.1885], [1.8994, 1.2824, 1.1078]], [[1...2488], [1.8440, 1.9789, 1.8949], [1.1208, 1.0673, 1.6736]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.6182, -1.3635, -1.9018]], [[-1.8643, -1.5363, -1.1462]], [[-1.9545, -1.5106, -1.8125]],...1.9415, -1.5686, -1.8972]], [[-1.0342, -1.8136, -1.7133]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.6845], [1.5344], [1.1162], [1.5949], [1.3401], [1.6216], [1...447], [1.3013], [1.2708], [1.2812], [1.5030]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.0639, -1.1143, -1.1664], [-1.3622, -1.6741, -1.3363], [-1.5725, -1.1180, -1.0609], ... [-1.8670, -1.7154, -1.7603], [-1.3835, -1.6139, -1.1620]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.1626], [1.7283], [1.7441], [1.4268], [1.2400], [1.4884], [1...211], [1.6391], [1.0343], [1.3863], [1.6503]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8947, -1.5271, -1.9227], [-1.5697, -1.2404, -1.0653], [-1.9688, -1.5758, -1.5447], ... [-1.7592, -1.0681, -1.5523], [-1.9421, -1.4276, -1.4928]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.2231, 1.3956, 1.0095], [1.3488, 1.2321, 1.3779], [1.6827, 1.8295, 1.1623], [1.6806,... 1.0154], [1.1838, 1.1142, 1.6111], [1.8896, 1.7841, 1.7062]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.5936], [-1.9246], [-1.4426], [-1.0135], [-1.4035], [-1.9186], ... [-1.1363], [-1.5750], [-1.6423], [-1.2860]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.3648, 1.3387, 1.7525], [1.7478, 1.9100, 1.9172], [1.4694, 1.4009, 1.1797], [1.0128,... 1.7727], [1.6752, 1.7547, 1.5282], [1.1029, 1.6945, 1.5492]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7977], [-1.2859], [-1.3162], [-1.9863], [-1.6940], [-1.4588], ... [-1.9548], [-1.7385], [-1.1114], [-1.1942]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.3806], [1.1330], [1.1165], [1.8482], [1.2378], [1.7999], [1...858], [1.8786], [1.5580], [1.9433], [1.8936]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.1589], [-1.5319], [-1.7057], [-1.7941], [-1.9985], [-1.7153], ... [-1.7015], [-1.4511], [-1.9180], [-1.7006]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.6428], [1.5059], [1.4235], [1.4180], [1.5165], [1.9007], [1...467], [1.3290], [1.8098], [1.0262], [1.0055]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.3738], [-1.2945], [-1.2212], [-1.6249], [-1.6990], [-1.9817], ... [-1.4983], [-1.0987], [-1.6287], [-1.6828]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([1.3283, 1.1499, 1.9372, 1.6175, 1.1832, 1.0075, 1.3367, 1.6092, 1.7783, 1.5867, 1.2794, 1.2947, 1.9811...6413, 1.6789, 1.3062, 1.9078, 1.2195, 1.5339, 1.1671, 1.0140, 1.7579], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.6945, -1.9605, -1.9347, -1.3747, -1.0393, -1.4547, -1.1764, -1.1987, -1.7835, -1.3870, -1.1389, -1....4250, -1.9392, -1.2195, -1.3517, -1.5885, -1.2311, -1.7991], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-sub-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([1.7186, 1.7097, 1.0614, 1.5061, 1.0384, 1.4516, 1.2893, 1.1022, 1.7407, 1.9788, 1.1215, 1.0483, 1.9106...0422, 1.4995, 1.4196, 1.1074, 1.6233, 1.5614, 1.1640, 1.0416, 1.0911], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.2345, -1.0668, -1.1419, -1.4954, -1.8500, -1.5042, -1.2209, -1.2984, -1.1438, -1.1802, -1.8769, -1....5866, -1.3025, -1.1669, -1.1558, -1.4362, -1.8451, -1.8220], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[[[[1.1094], [1.1577], [1.0158]]], [[[1.0955], [1.3705], ...1]]], [[[1.2389], [1.3454], [1.2483]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8801]], [[1.0881]], [[1.2644]]]], [[[[1.0247]], [[1.514... [[[[1.7822]], [[1.5983]], [[1.2552]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[[[[1.7606], [1.8144], [1.4749]]], [[[1.2901], [1.9895], ...1]]], [[[1.6079], [1.9016], [1.5327]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7770]], [[1.0506]], [[1.9817]]]], [[[[1.1020]], [[1.406... [[[[1.5667]], [[1.1099]], [[1.6353]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[1.6401, 1.9532, 1.2558], [1.4173, 1.1467, 1.0517], [1.5136, 1.4918, 1.3467]], [[1...8810], [1.8554, 1.6190, 1.7464], [1.8361, 1.1985, 1.0727]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.4391, 1.2748, 1.8309]], [[1.0521, 1.3079, 1.1467]], [[1.8286, 1.4462, 1.0532]], ...]], [[1.1692, 1.0901, 1.1074]], [[1.6884, 1.4915, 1.8326]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[1.8555, 1.3776, 1.8515], [1.1993, 1.1307, 1.7017], [1.1510, 1.2103, 1.2269]], [[1...2465], [1.0904, 1.0035, 1.5136], [1.0908, 1.6034, 1.1005]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.2124, 1.4891, 1.8288]], [[1.5399, 1.4555, 1.8356]], [[1.8392, 1.0377, 1.1803]], ...]], [[1.5023, 1.4982, 1.3307]], [[1.8430, 1.9342, 1.5776]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.4534], [1.2493], [1.3906], [1.9280], [1.3044], [1.6196], [1...406], [1.2074], [1.9935], [1.8287], [1.6576]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2954, 1.1932, 1.1982], [1.3975, 1.7715, 1.4566], [1.9144, 1.5380, 1.4101], [1.8543,... 1.9252], [1.6589, 1.9409, 1.6025], [1.7075, 1.7750, 1.4058]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.8577], [1.4468], [1.7156], [1.2442], [1.8403], [1.7911], [1...523], [1.2846], [1.9968], [1.5554], [1.0376]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8526, 1.1920, 1.6067], [1.0452, 1.3839, 1.5136], [1.3186, 1.5975, 1.2141], [1.4315,... 1.5244], [1.4728, 1.0942, 1.1273], [1.7602, 1.5139, 1.9166]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.2695, 1.4507, 1.9591], [1.7409, 1.9274, 1.2853], [1.0555, 1.3629, 1.8810], [1.3217,... 1.4797], [1.1902, 1.2625, 1.4448], [1.1351, 1.5790, 1.5343]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4696], [1.5453], [1.6263], [1.5489], [1.2393], [1.2301], [1...252], [1.0971], [1.5067], [1.2049], [1.1038]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.8630, 1.3216, 1.7894], [1.2093, 1.5291, 1.3904], [1.4678, 1.2692, 1.9398], [1.8948,... 1.5221], [1.5688, 1.8623, 1.2004], [1.3724, 1.4753, 1.5450]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8584], [1.1360], [1.0961], [1.8880], [1.3657], [1.1984], [1...377], [1.8909], [1.3968], [1.9293], [1.3526]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.0659], [1.2545], [1.7910], [1.5625], [1.4345], [1.6125], [1...695], [1.0737], [1.4050], [1.8741], [1.4402]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0583], [1.9795], [1.2976], [1.7550], [1.7640], [1.4871], [1...115], [1.6221], [1.1724], [1.3289], [1.5920]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.6384], [1.3057], [1.8569], [1.2275], [1.9012], [1.0821], [1...514], [1.9201], [1.7979], [1.9199], [1.5082]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3756], [1.4100], [1.5017], [1.2107], [1.1631], [1.7269], [1...765], [1.3175], [1.3241], [1.3228], [1.0044]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([1.9991, 1.7531, 1.2939, 1.8333, 1.4182, 1.0288, 1.4599, 1.1317, 1.1531, 1.1563, 1.3830, 1.0487, 1.4476...5815, 1.0442, 1.0620, 1.5130, 1.3501, 1.9202, 1.8254, 1.5714, 1.2011], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.9746, 1.9457, 1.4082, 1.2389, 1.7288, 1.3519, 1.7175, 1.9306, 1.0590, 1.5110, 1.4003, 1.0192, 1.3998...7307, 1.5935, 1.8833, 1.0861, 1.0037, 1.9302, 1.1350, 1.1015, 1.8643], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-mul-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([1.5561, 1.0071, 1.0717, 1.0897, 1.5560, 1.1646, 1.1865, 1.3131, 1.8647, 1.0211, 1.2740, 1.9726, 1.0695...5843, 1.5390, 1.5151, 1.6871, 1.3024, 1.9913, 1.3063, 1.4451, 1.8041], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.1942, 1.4060, 1.9566, 1.5567, 1.1590, 1.4907, 1.1329, 1.0052, 1.7790, 1.1886, 1.6181, 1.2258, 1.2114...8703, 1.6968, 1.0459, 1.8607, 1.1233, 1.9670, 1.1578, 1.4099, 1.7395], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[[[[1.1138], [1.0022], [1.9241]]], [[[1.4423], [1.1856], ...2]]], [[[1.9894], [1.8014], [1.6993]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6494]], [[0.6875]], [[0.9919]]]], [[[[0.5182]], [[0.986... [[[[0.5131]], [[0.5730]], [[0.8784]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[[[[1.0881], [1.5295], [1.6439]]], [[[1.1347], [1.1431], ...7]]], [[[1.4702], [1.0810], [1.5049]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.8315]], [[0.8681]], [[0.5367]]]], [[[[0.5080]], [[0.572... [[[[0.5615]], [[0.5595]], [[0.5683]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[1.6326, 1.2039, 1.1745], [1.0549, 1.9031, 1.5341], [1.4877, 1.7274, 1.7217]], [[1...2215], [1.5981, 1.6551, 1.1043], [1.2382, 1.3143, 1.4383]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.6554, 0.6850, 0.7219]], [[0.6504, 0.6280, 0.5868]], [[0.5611, 0.5057, 0.8433]], ... [[0.6169, 0.9513, 0.8281]], [[0.9744, 0.6721, 0.9025]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[1.7129, 1.3390, 1.9207], [1.2493, 1.3820, 1.1657], [1.4051, 1.6324, 1.2700]], [[1...6488], [1.4589, 1.0403, 1.1572], [1.8744, 1.3302, 1.7853]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.5747, 0.7841, 0.6623]], [[0.6335, 0.5194, 0.9935]], [[0.6843, 0.7845, 0.7297]], ... [[0.7136, 0.7043, 0.8721]], [[0.5394, 0.8735, 0.6149]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.4762], [1.7209], [1.9148], [1.4821], [1.1799], [1.3826], [1...463], [1.9981], [1.8153], [1.6818], [1.0280]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7431, 0.7950, 0.5887], [0.7882, 0.6318, 0.5305], [0.7151, 0.5491, 0.8115], [0.5437,...222], [0.5374, 0.5333, 0.7308], [0.8079, 0.6935, 0.5146]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.8446], [1.2595], [1.5330], [1.0019], [1.8563], [1.3785], [1...470], [1.8698], [1.1476], [1.1656], [1.7829]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9706, 0.5676, 0.6208], [0.5998, 0.6292, 0.8583], [0.9958, 0.9696, 0.8232], [0.5306,...830], [0.7192, 0.5214, 0.9936], [0.5726, 0.8819, 0.6156]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.3345, 1.0574, 1.3575], [1.2572, 1.1618, 1.2650], [1.3651, 1.4718, 1.6712], [1.7170,... 1.8375], [1.8265, 1.2033, 1.4783], [1.5954, 1.0222, 1.4144]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7375], [0.5867], [0.5311], [0.8601], [0.9993], [0.8844], [0..., [0.5005], [0.8325], [0.6216], [0.5055]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.0291, 1.6080, 1.5929], [1.6941, 1.4055, 1.6903], [1.7402, 1.5274, 1.0813], [1.8183,... 1.6998], [1.3428, 1.6187, 1.2212], [1.7766, 1.2219, 1.8398]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5773], [0.6553], [0.8424], [0.5639], [0.5369], [0.8558], [0..., [0.6927], [0.7270], [0.6374], [0.9975]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.2958], [1.8718], [1.8766], [1.6618], [1.4314], [1.7738], [1...740], [1.7972], [1.8501], [1.0837], [1.8284]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9643], [0.6293], [0.5654], [0.7310], [0.6283], [0.5749], [0..., [0.5406], [0.5780], [0.5099], [0.7606]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.0724], [1.1543], [1.0248], [1.1978], [1.1577], [1.6484], [1...950], [1.8805], [1.0211], [1.1439], [1.9464]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5079], [0.8889], [0.6705], [0.8538], [0.8992], [0.5584], [0..., [0.8948], [0.7090], [0.7816], [0.6349]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([1.3543, 1.4228, 1.4527, 1.2140, 1.7171, 1.1715, 1.4447, 1.7856, 1.2859, 1.6294, 1.7122, 1.9256, 1.8243...5188, 1.7902, 1.5827, 1.5346, 1.6453, 1.3571, 1.5679, 1.4737, 1.2501], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.6166, 0.8431, 0.6601, 0.5638, 0.6587, 0.9966, 0.6093, 0.8652, 0.5851, 0.5543, 0.5872, 0.6393, 0.8996..., 0.6784, 0.6023, 0.5794, 0.5955, 0.5119, 0.5048, 0.6913, 0.5360], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype0-min-div-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([1.4133, 1.7780, 1.7752, 1.4689, 1.7268, 1.2512, 1.9331, 1.3180, 1.2548, 1.6282, 1.6988, 1.1262, 1.1126...6810, 1.3637, 1.6592, 1.9873, 1.1246, 1.6865, 1.3687, 1.0176, 1.6033], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.8898, 0.5066, 0.6875, 0.9768, 0.5125, 0.5610, 0.7921, 0.5266, 0.8717, 0.6381, 0.6893, 0.6207, 0.9531..., 0.9252, 0.8380, 0.7402, 0.5831, 0.6122, 0.6134, 0.6012, 0.7883], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: div, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp0-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[[[[[1.4864], [1.0332], [1.3697]]], [[[1.7931], [1.4794], ...8]]], [[[1.2944], [1.9707], [1.5755]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2865]], [[1.1476]], [[1.2312]]]], [[[[1.5051]], [[1.064... [[[[1.7603]], [[1.2995]], [[1.7055]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp0-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[[[[[1.7879], [1.2010], [1.7030]]], [[[1.2291], [1.5786], ...8]]], [[[1.9657], [1.0880], [1.2586]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0220]], [[1.6451]], [[1.5775]]]], [[[[1.8598]], [[1.953... [[[[1.4793]], [[1.6151]], [[1.9346]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp1-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[[1.6567, 1.1607, 1.0953], [1.1791, 1.6763, 1.6591], [1.0671, 1.7188, 1.7130]], [[1...3990], [1.8905, 1.9012, 1.6384], [1.3949, 1.7025, 1.0475]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.6336, 1.0768, 1.7676]], [[1.3118, 1.0929, 1.4314]], [[1.1557, 1.2726, 1.1903]], ...]], [[1.0049, 1.2379, 1.7082]], [[1.8118, 1.9856, 1.5737]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp1-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[[1.2807, 1.7475, 1.6994], [1.0755, 1.3219, 1.7354], [1.0304, 1.2293, 1.1335]], [[1...7250], [1.5421, 1.6717, 1.9188], [1.0872, 1.8892, 1.0019]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.4151, 1.1433, 1.3580]], [[1.2472, 1.1119, 1.3119]], [[1.6937, 1.1085, 1.3096]], ...]], [[1.5026, 1.7068, 1.7178]], [[1.3235, 1.4363, 1.7216]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp2-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.2940], [1.1722], [1.9179], [1.2632], [1.8029], [1.2306], [1...191], [1.0817], [1.8098], [1.2779], [1.8459]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0197, 1.0459, 1.1780], [1.1862, 1.3184, 1.5801], [1.0240, 1.4399, 1.0381], [1.9369,... 1.4293], [1.6705, 1.2382, 1.3451], [1.1051, 1.5993, 1.7518]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp2-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.0320], [1.8765], [1.1268], [1.8408], [1.8076], [1.1097], [1...840], [1.5753], [1.5955], [1.5770], [1.0560]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9808, 1.5866, 1.0256], [1.7597, 1.4265, 1.7167], [1.0475, 1.7607, 1.6256], [1.2318,... 1.9269], [1.7434, 1.7852, 1.5972], [1.7010, 1.2172, 1.5527]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp3-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.9029, 1.4097, 1.3046], [1.3639, 1.8489, 1.1953], [1.9702, 1.1912, 1.6449], [1.9946,... 1.9386], [1.1181, 1.2967, 1.4620], [1.5490, 1.7773, 1.5782]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5456], [1.9721], [1.6301], [1.8505], [1.4244], [1.6296], [1...218], [1.0535], [1.4711], [1.8235], [1.6254]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp3-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.4454, 1.1053, 1.0226], [1.7260, 1.5270, 1.1793], [1.6601, 1.3207, 1.6422], [1.3360,... 1.3785], [1.9916, 1.1507, 1.2681], [1.5969, 1.5697, 1.1212]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6433], [1.9308], [1.9981], [1.8541], [1.5525], [1.2599], [1...521], [1.1475], [1.8754], [1.1404], [1.1632]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp4-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.5549], [1.1664], [1.5616], [1.8898], [1.8246], [1.4588], [1...790], [1.6818], [1.0905], [1.5345], [1.4956]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6412], [1.5511], [1.6272], [1.3848], [1.4163], [1.1454], [1...068], [1.8417], [1.8293], [1.3225], [1.5047]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp4-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.7606], [1.3806], [1.6949], [1.7318], [1.6746], [1.6239], [1...024], [1.5262], [1.0185], [1.5883], [1.4894]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9256], [1.7758], [1.3901], [1.6491], [1.0080], [1.8986], [1...353], [1.8069], [1.0451], [1.5015], [1.2048]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp5-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([1.3518, 1.3069, 1.4534, 1.0618, 1.9028, 1.3276, 1.4347, 1.2671, 1.1136, 1.4444, 1.0371, 1.6555, 1.1926...7236, 1.3595, 1.2539, 1.4153, 1.0054, 1.6603, 1.3831, 1.7773, 1.5697], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.6965, 1.5458, 1.7217, 1.4152, 1.2376, 1.6111, 1.8805, 1.4344, 1.7412, 1.9396, 1.5357, 1.4802, 1.5816...8641, 1.4186, 1.9937, 1.6841, 1.0408, 1.6298, 1.9915, 1.1443, 1.0874], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_lhs-shp5-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([1.6642, 1.1389, 1.9626, 1.2112, 1.2117, 1.2243, 1.0199, 1.4497, 1.0322, 1.7969, 1.5776, 1.0493, 1.0259...7929, 1.6374, 1.3503, 1.4400, 1.9170, 1.0800, 1.4239, 1.3555, 1.6342], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.4269, 1.4065, 1.1330, 1.1856, 1.3226, 1.8293, 1.6538, 1.2012, 1.9114, 1.5239, 1.4352, 1.5743, 1.8570...7755, 1.0767, 1.9778, 1.8014, 1.1638, 1.1122, 1.4180, 1.3864, 1.4314], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp0-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[[[[[1.2861], [1.4740], [1.5132]]], [[[1.7331], [1.9137], ...1]]], [[[1.2524], [1.9724], [1.2768]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.6795]], [[1.1561]], [[1.8566]]]], [[[[1.2878]], [[1.983... [[[[1.1341]], [[1.7462]], [[1.8752]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp0-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[[[[[1.3154], [1.8356], [1.6957]]], [[[1.4993], [1.9694], ...1]]], [[[1.7450], [1.2841], [1.1533]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2506]], [[1.2305]], [[1.2623]]]], [[[[1.0162]], [[1.585... [[[[1.7416]], [[1.5601]], [[1.4736]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp1-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[[1.0000, 1.5257, 1.8336], [1.1057, 1.8962, 1.5127], [1.8652, 1.5493, 1.8373]], [[1...4147], [1.8580, 1.2324, 1.6349], [1.3625, 1.1889, 1.9191]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.6235, 1.8867, 1.0460]], [[1.1021, 1.9747, 1.2705]], [[1.6694, 1.1850, 1.4201]], ...]], [[1.1787, 1.0944, 1.6087]], [[1.0481, 1.1148, 1.4603]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp1-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[[1.3223, 1.6392, 1.4283], [1.9686, 1.4472, 1.5462], [1.5962, 1.9364, 1.8772]], [[1...3752], [1.2524, 1.1085, 1.2061], [1.7635, 1.6548, 1.7962]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.5426, 1.3834, 1.2039]], [[1.7254, 1.7935, 1.1352]], [[1.0422, 1.3360, 1.1733]], ...]], [[1.9447, 1.6623, 1.0403]], [[1.5260, 1.0270, 1.8514]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp2-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.9825], [1.7323], [1.0089], [1.5063], [1.8715], [1.8382], [1...621], [1.5353], [1.5865], [1.3765], [1.9438]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6470, 1.8324, 1.5301], [1.2997, 1.1876, 1.8170], [1.8179, 1.6092, 1.4850], [1.8303,... 1.9133], [1.2363, 1.9386, 1.1686], [1.8832, 1.8759, 1.9830]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp2-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.9582], [1.5477], [1.1914], [1.1608], [1.4415], [1.5580], [1...557], [1.2004], [1.8216], [1.5399], [1.6995]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2331, 1.5841, 1.9947], [1.5420, 1.1626, 1.1918], [1.1729, 1.4134, 1.5961], [1.4395,... 1.4353], [1.3045, 1.9080, 1.3258], [1.4593, 1.6232, 1.1285]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp3-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.5618, 1.6646, 1.0994], [1.0111, 1.4802, 1.3180], [1.9564, 1.9270, 1.9588], [1.3931,... 1.1766], [1.6301, 1.6650, 1.5692], [1.5305, 1.3456, 1.1910]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5940], [1.5223], [1.9485], [1.0218], [1.3918], [1.1374], [1...399], [1.1036], [1.5259], [1.1591], [1.8347]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp3-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.4721, 1.8037, 1.4493], [1.9854, 1.1126, 1.4460], [1.4376, 1.3245, 1.6171], [1.2408,... 1.1595], [1.7619, 1.2215, 1.6391], [1.1307, 1.3027, 1.8680]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1352], [1.2954], [1.5011], [1.4194], [1.6593], [1.3915], [1...953], [1.3539], [1.5897], [1.2885], [1.6908]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp4-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.5118], [1.4998], [1.1480], [1.6254], [1.4252], [1.5371], [1...190], [1.0962], [1.8882], [1.1042], [1.6544]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8193], [1.1478], [1.4216], [1.7995], [1.5188], [1.3667], [1...359], [1.2684], [1.9288], [1.1181], [1.2467]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp4-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.0551], [1.6503], [1.1148], [1.9920], [1.0727], [1.2034], [1...584], [1.9880], [1.2535], [1.4955], [1.6233]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8831], [1.4872], [1.9566], [1.5082], [1.0831], [1.8553], [1...989], [1.5484], [1.5196], [1.3693], [1.6919]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp5-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([1.2304, 1.4292, 1.9073, 1.3457, 1.9933, 1.8010, 1.8677, 1.8019, 1.7165, 1.3694, 1.9239, 1.6534, 1.0068...8661, 1.6950, 1.5235, 1.3807, 1.3332, 1.4287, 1.0687, 1.4150, 1.6943], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.6859, 1.7809, 1.7460, 1.9935, 1.8777, 1.8977, 1.6988, 1.1317, 1.7765, 1.8282, 1.1768, 1.1380, 1.4596...0710, 1.7306, 1.1559, 1.4019, 1.1524, 1.2075, 1.5762, 1.0416, 1.8902], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype0-min-copy_rhs-shp5-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([1.9295, 1.2920, 1.6236, 1.1593, 1.7224, 1.1127, 1.7076, 1.7660, 1.5338, 1.9190, 1.8216, 1.0588, 1.7317...8444, 1.5344, 1.9091, 1.4396, 1.6889, 1.0222, 1.0162, 1.3150, 1.1554], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.6416, 1.5811, 1.6590, 1.6119, 1.9101, 1.0338, 1.1441, 1.3019, 1.3451, 1.0929, 1.8555, 1.4152, 1.5931...1206, 1.1285, 1.5967, 1.4010, 1.3293, 1.3883, 1.3298, 1.8272, 1.5288], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_rhs, reduce func: min) ______________________ test_spmm[idtype0-max-add-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[[[[1.6043], [1.6876], [1.6699]]], [[[1.8457], [1.6440], ...4]]], [[[1.3541], [1.6723], [1.7950]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4516]], [[1.0661]], [[1.1743]]]], [[[[1.2535]], [[1.197... [[[[1.1510]], [[1.0158]], [[1.3630]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[[[[1.3413], [1.8904], [1.4366]]], [[[1.7681], [1.8938], ...1]]], [[[1.1234], [1.6714], [1.8472]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3815]], [[1.7025]], [[1.7428]]]], [[[[1.4077]], [[1.033... [[[[1.9601]], [[1.1381]], [[1.3480]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[1.0281, 1.2349, 1.8988], [1.6012, 1.0631, 1.0675], [1.4586, 1.1925, 1.5404]], [[1...9137], [1.2043, 1.6751, 1.9481], [1.7065, 1.2178, 1.5582]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.4874, 1.9086, 1.9045]], [[1.0983, 1.6715, 1.4013]], [[1.4935, 1.3226, 1.9880]], ...]], [[1.1885, 1.3207, 1.9555]], [[1.6941, 1.7552, 1.7328]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[1.2563, 1.4003, 1.6367], [1.3839, 1.6973, 1.3616], [1.8952, 1.2297, 1.5386]], [[1...0983], [1.2578, 1.2060, 1.4956], [1.9623, 1.3857, 1.1778]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.5351, 1.2870, 1.1302]], [[1.7773, 1.9411, 1.4194]], [[1.0009, 1.5971, 1.5210]], ...]], [[1.4130, 1.1265, 1.3854]], [[1.7044, 1.3819, 1.6447]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.2739], [1.9907], [1.7108], [1.4494], [1.7854], [1.3175], [1...781], [1.5551], [1.9174], [1.6490], [1.8310]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5755, 1.8684, 1.4957], [1.7181, 1.8645, 1.4433], [1.0523, 1.2875, 1.9412], [1.4421,... 1.4753], [1.6464, 1.4678, 1.9822], [1.4903, 1.5819, 1.1340]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.4273], [1.2721], [1.6440], [1.1935], [1.3824], [1.2633], [1...130], [1.9070], [1.4969], [1.3015], [1.6641]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7214, 1.7241, 1.4306], [1.9133, 1.6394, 1.7716], [1.4056, 1.3562, 1.1700], [1.6935,... 1.6851], [1.0095, 1.3532, 1.5881], [1.6983, 1.7902, 1.7770]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.3216, 1.9615, 1.3173], [1.8955, 1.8481, 1.8115], [1.6751, 1.5220, 1.7750], [1.7419,... 1.6787], [1.6633, 1.1499, 1.7859], [1.7906, 1.8313, 1.9635]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8106], [1.3868], [1.1465], [1.0231], [1.3065], [1.1119], [1...812], [1.3021], [1.2004], [1.5025], [1.8443]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.5217, 1.3594, 1.7827], [1.7802, 1.5787, 1.0712], [1.6978, 1.7020, 1.3062], [1.1989,... 1.2604], [1.0105, 1.8433, 1.9876], [1.6258, 1.8054, 1.5159]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2166], [1.5300], [1.1877], [1.6076], [1.4640], [1.3451], [1...825], [1.2531], [1.5587], [1.4323], [1.9294]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.7328], [1.0308], [1.0721], [1.2534], [1.2157], [1.9000], [1...953], [1.9884], [1.7190], [1.9095], [1.3094]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9688], [1.4122], [1.7085], [1.0934], [1.6539], [1.5382], [1...892], [1.1639], [1.6475], [1.1266], [1.9194]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.8163], [1.9563], [1.9519], [1.0179], [1.0182], [1.3515], [1...282], [1.3424], [1.7160], [1.2056], [1.4275]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3545], [1.2097], [1.7338], [1.9635], [1.5591], [1.4940], [1...371], [1.2051], [1.1214], [1.3412], [1.4935]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([1.5412, 1.7817, 1.8495, 1.0589, 1.8504, 1.0300, 1.2370, 1.1027, 1.1399, 1.7765, 1.8337, 1.7660, 1.1554...7215, 1.0784, 1.5627, 1.8300, 1.9660, 1.7088, 1.4339, 1.8865, 1.3138], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.4495, 1.2577, 1.0542, 1.0880, 1.2047, 1.8762, 1.6219, 1.1892, 1.9954, 1.1483, 1.5179, 1.7609, 1.3176...9331, 1.7277, 1.0170, 1.1251, 1.4778, 1.1254, 1.7598, 1.4081, 1.8603], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-add-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([1.4063, 1.3830, 1.4981, 1.7725, 1.4007, 1.5434, 1.5645, 1.5404, 1.3260, 1.4118, 1.3630, 1.1936, 1.1949...6332, 1.2509, 1.3742, 1.0020, 1.6533, 1.8552, 1.6636, 1.3759, 1.7270], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.1042, 1.0551, 1.3690, 1.4470, 1.5422, 1.7476, 1.4803, 1.1073, 1.2068, 1.3859, 1.5582, 1.8621, 1.7468...6782, 1.6427, 1.9378, 1.6333, 1.8549, 1.4233, 1.0341, 1.0981, 1.3032], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[[[[1.2712], [1.4794], [1.6621]]], [[[1.8743], [1.7420], ...8]]], [[[1.1658], [1.7488], [1.2261]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.3494]], [[-1.2876]], [[-1.0105]]]], [[[[-1.0053]], [[-... [[[[-1.5635]], [[-1.8458]], [[-1.4316]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[[[[1.9185], [1.7220], [1.9158]]], [[[1.0820], [1.6220], ...2]]], [[[1.2471], [1.3585], [1.9873]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.9148]], [[-1.3888]], [[-1.3484]]]], [[[[-1.5951]], [[-... [[[[-1.4918]], [[-1.2524]], [[-1.3086]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[1.5875, 1.6568, 1.4121], [1.0362, 1.4537, 1.9956], [1.4592, 1.6266, 1.9625]], [[1...0990], [1.4022, 1.6478, 1.0537], [1.5993, 1.3719, 1.0393]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.0443, -1.7883, -1.8467]], [[-1.2268, -1.0067, -1.2243]], [[-1.4635, -1.4763, -1.5797]],...1.5758, -1.2245, -1.3437]], [[-1.0822, -1.2541, -1.7349]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[1.7851, 1.0406, 1.5278], [1.1075, 1.1988, 1.9011], [1.9069, 1.7544, 1.1572]], [[1...0556], [1.8551, 1.3292, 1.9384], [1.5410, 1.5350, 1.2063]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.3493, -1.3500, -1.4668]], [[-1.9450, -1.4350, -1.0254]], [[-1.9567, -1.3435, -1.2441]],...1.9397, -1.6907, -1.4869]], [[-1.7284, -1.4853, -1.5539]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.0144], [1.6239], [1.3819], [1.3991], [1.4652], [1.9368], [1...317], [1.9398], [1.5033], [1.8227], [1.8471]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.2182, -1.2502, -1.3634], [-1.0293, -1.2039, -1.9643], [-1.0753, -1.4201, -1.4781], ... [-1.7728, -1.0712, -1.4941], [-1.3075, -1.2019, -1.8277]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.3372], [1.2482], [1.7977], [1.6185], [1.0351], [1.8196], [1...420], [1.9152], [1.6929], [1.9283], [1.3846]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.3921, -1.4249, -1.4012], [-1.5084, -1.1827, -1.8158], [-1.4739, -1.1013, -1.1790], ... [-1.8358, -1.5271, -1.1155], [-1.8576, -1.3404, -1.7350]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.7150, 1.9349, 1.1802], [1.4885, 1.0165, 1.8286], [1.5390, 1.9419, 1.6723], [1.5401,... 1.2907], [1.3154, 1.7509, 1.3359], [1.4569, 1.0246, 1.3703]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.4252], [-1.1983], [-1.8137], [-1.3008], [-1.8251], [-1.9239], ... [-1.7677], [-1.9888], [-1.0225], [-1.3400]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.3990, 1.1509, 1.2573], [1.3798, 1.2323, 1.1532], [1.4858, 1.5461, 1.8151], [1.4374,... 1.4095], [1.8179, 1.2997, 1.1341], [1.9947, 1.4188, 1.2126]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.3901], [-1.2037], [-1.1142], [-1.0586], [-1.6723], [-1.4457], ... [-1.6002], [-1.4959], [-1.4474], [-1.0033]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.9320], [1.0766], [1.2509], [1.9864], [1.7195], [1.2525], [1...033], [1.9184], [1.9789], [1.4373], [1.6461]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.9965], [-1.5284], [-1.2159], [-1.5186], [-1.6239], [-1.0104], ... [-1.1538], [-1.3720], [-1.8042], [-1.9574]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.7155], [1.4480], [1.7019], [1.6417], [1.6891], [1.6751], [1...633], [1.8876], [1.8737], [1.3142], [1.8154]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.1052], [-1.9650], [-1.4293], [-1.6492], [-1.0204], [-1.1236], ... [-1.4364], [-1.1503], [-1.7635], [-1.7693]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([1.5175, 1.1506, 1.0445, 1.1496, 1.5334, 1.6856, 1.4515, 1.2659, 1.7874, 1.1435, 1.3699, 1.4695, 1.0419...8641, 1.6705, 1.0787, 1.3841, 1.2191, 1.9608, 1.6233, 1.8435, 1.6743], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.7569, -1.4238, -1.7704, -1.3059, -1.9850, -1.1502, -1.2466, -1.7237, -1.1774, -1.3277, -1.4171, -1....8511, -1.6954, -1.4308, -1.5712, -1.3378, -1.7359, -1.7962], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-sub-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([1.5633, 1.7182, 1.2122, 1.4843, 1.6634, 1.1849, 1.4289, 1.8970, 1.3097, 1.6854, 1.3761, 1.7545, 1.4253...8295, 1.2332, 1.0882, 1.2984, 1.3948, 1.7682, 1.7613, 1.1876, 1.4397], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.4771, -1.3277, -1.0077, -1.4428, -1.5778, -1.9583, -1.9517, -1.5341, -1.3671, -1.6054, -1.2842, -1....7387, -1.1167, -1.5986, -1.6349, -1.4961, -1.0087, -1.4304], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[[[[1.3384], [1.3777], [1.8374]]], [[[1.7099], [1.5344], ...3]]], [[[1.1507], [1.7018], [1.6464]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5020]], [[1.2979]], [[1.3698]]]], [[[[1.1362]], [[1.281... [[[[1.9521]], [[1.3032]], [[1.6398]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[[[[1.3364], [1.1958], [1.3392]]], [[[1.3041], [1.5241], ...7]]], [[[1.8621], [1.8137], [1.7288]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9704]], [[1.1453]], [[1.2892]]]], [[[[1.2404]], [[1.060... [[[[1.4362]], [[1.4576]], [[1.2926]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[1.4843, 1.0222, 1.4591], [1.4326, 1.6433, 1.4465], [1.8117, 1.6602, 1.5397]], [[1...1394], [1.3471, 1.8673, 1.5068], [1.8070, 1.6199, 1.1492]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.7921, 1.4512, 1.7257]], [[1.1861, 1.0018, 1.7530]], [[1.7835, 1.2899, 1.6118]], ...]], [[1.3556, 1.8269, 1.0807]], [[1.5538, 1.4740, 1.8244]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[1.0052, 1.2981, 1.9887], [1.9113, 1.8018, 1.4297], [1.4276, 1.2351, 1.1197]], [[1...8075], [1.4351, 1.7341, 1.2152], [1.4994, 1.9150, 1.5186]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.9542, 1.6503, 1.7380]], [[1.4694, 1.5248, 1.3434]], [[1.3053, 1.4629, 1.7323]], ...]], [[1.7833, 1.3262, 1.7846]], [[1.0873, 1.8522, 1.3156]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.5116], [1.0514], [1.0876], [1.0077], [1.5933], [1.1446], [1...406], [1.1226], [1.2962], [1.9754], [1.0542]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7558, 1.0754, 1.2241], [1.2976, 1.6328, 1.7057], [1.0352, 1.7697, 1.5591], [1.5782,... 1.1229], [1.9557, 1.5998, 1.2629], [1.0345, 1.1396, 1.8267]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.5867], [1.5849], [1.2086], [1.7655], [1.9723], [1.6365], [1...481], [1.3336], [1.3153], [1.6916], [1.0614]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8403, 1.8126, 1.8392], [1.8783, 1.6265, 1.6571], [1.7354, 1.1338, 1.5931], [1.6463,... 1.8540], [1.1153, 1.8574, 1.1779], [1.2257, 1.5403, 1.9887]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.1615, 1.2613, 1.6297], [1.0523, 1.6618, 1.5543], [1.7560, 1.3979, 1.7666], [1.9303,... 1.0475], [1.7448, 1.4306, 1.3983], [1.6332, 1.8569, 1.9249]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2183], [1.6383], [1.9280], [1.8380], [1.5889], [1.9209], [1...020], [1.5064], [1.3640], [1.2880], [1.0348]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.0310, 1.1184, 1.6185], [1.1680, 1.0299, 1.9023], [1.8661, 1.2108, 1.3288], [1.6900,... 1.4794], [1.6772, 1.6250, 1.2951], [1.0197, 1.7047, 1.6659]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4846], [1.3366], [1.6799], [1.2955], [1.1397], [1.0478], [1...442], [1.6722], [1.3674], [1.4672], [1.8525]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.6600], [1.1677], [1.4272], [1.7254], [1.0293], [1.1737], [1...033], [1.0519], [1.8423], [1.6517], [1.3495]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2099], [1.5100], [1.2586], [1.0733], [1.9432], [1.0222], [1...260], [1.1665], [1.6293], [1.1930], [1.3240]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.6858], [1.6293], [1.6561], [1.9367], [1.4207], [1.1865], [1...594], [1.9042], [1.5079], [1.4289], [1.4490]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6194], [1.8559], [1.5311], [1.8276], [1.6283], [1.2096], [1...249], [1.6587], [1.4228], [1.8076], [1.4938]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([1.7390, 1.8184, 1.0022, 1.7023, 1.9772, 1.5539, 1.5687, 1.1383, 1.6269, 1.8598, 1.5472, 1.4398, 1.2368...9801, 1.4493, 1.8109, 1.7794, 1.1311, 1.3044, 1.1038, 1.2230, 1.7086], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.0173, 1.7250, 1.9276, 1.1970, 1.0850, 1.1061, 1.5007, 1.1779, 1.2405, 1.1464, 1.9379, 1.7185, 1.2322...9375, 1.9161, 1.7018, 1.2650, 1.3835, 1.2762, 1.7100, 1.3665, 1.8027], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-mul-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([1.7283, 1.1122, 1.7515, 1.5091, 1.0567, 1.8180, 1.6410, 1.6929, 1.4745, 1.9529, 1.6848, 1.4201, 1.2018...9104, 1.4461, 1.0315, 1.2788, 1.9321, 1.3439, 1.1456, 1.2659, 1.5925], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.8752, 1.7782, 1.3396, 1.9926, 1.2828, 1.6160, 1.7757, 1.2260, 1.9685, 1.6916, 1.7629, 1.0831, 1.4620...1576, 1.3390, 1.0072, 1.2470, 1.0805, 1.3343, 1.5332, 1.9645, 1.8619], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp0-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[[[[1.2471], [1.8453], [1.2893]]], [[[1.9798], [1.5650], ...9]]], [[[1.7688], [1.5081], [1.6749]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.7047]], [[0.6783]], [[0.8943]]]], [[[[0.7110]], [[0.590... [[[[0.5352]], [[0.5795]], [[0.8714]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp0-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[[[[1.7685], [1.4749], [1.7441]]], [[[1.9962], [1.4189], ...6]]], [[[1.6869], [1.6635], [1.7210]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.7932]], [[0.6988]], [[0.9588]]]], [[[[0.5611]], [[0.630... [[[[0.5166]], [[0.7604]], [[0.8788]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp1-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[1.6071, 1.6815, 1.1389], [1.8109, 1.1794, 1.0046], [1.3537, 1.1834, 1.2734]], [[1...8599], [1.6369, 1.8951, 1.5758], [1.8754, 1.9009, 1.4676]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.5794, 0.6545, 0.6904]], [[0.5717, 0.6504, 0.5553]], [[0.7135, 0.5107, 0.5211]], ... [[0.6638, 0.6435, 0.8232]], [[0.5585, 0.5649, 0.7271]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp1-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[1.1846, 1.0133, 1.0273], [1.4928, 1.0909, 1.4066], [1.8139, 1.4269, 1.9989]], [[1...9758], [1.0754, 1.6533, 1.7957], [1.3055, 1.7279, 1.2886]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.5623, 0.5084, 0.7150]], [[0.6253, 0.5528, 0.5756]], [[0.5618, 0.7331, 0.7193]], ... [[0.9243, 0.9479, 0.9956]], [[0.7292, 0.6753, 0.5750]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp2-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.6045], [1.5783], [1.3732], [1.9559], [1.2235], [1.1237], [1...435], [1.4190], [1.1845], [1.4162], [1.7927]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6328, 0.5482, 0.6232], [0.5452, 0.5364, 0.6508], [0.6497, 0.6103, 0.6070], [0.7770,...829], [0.5753, 0.5214, 0.5092], [0.7536, 0.5032, 0.7291]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp2-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.1873], [1.4048], [1.4414], [1.8782], [1.0316], [1.2334], [1...510], [1.4583], [1.0353], [1.9617], [1.2606]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6839, 0.6237, 0.9912], [0.6308, 0.7816, 0.5106], [0.5754, 0.5731, 0.5451], [0.6091,...056], [0.9096, 0.5807, 0.5265], [0.7957, 0.6191, 0.6110]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp3-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.8004, 1.6731, 1.8183], [1.1278, 1.8581, 1.6110], [1.3449, 1.0106, 1.6240], [1.1257,... 1.4590], [1.8462, 1.2020, 1.5627], [1.8040, 1.9130, 1.0948]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5750], [0.7785], [0.6971], [0.5087], [0.6220], [0.7962], [0..., [0.5701], [0.5427], [0.7714], [0.7827]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp3-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.7848, 1.6837, 1.3920], [1.8869, 1.2801, 1.7656], [1.5420, 1.0244, 1.6464], [1.8012,... 1.9104], [1.4313, 1.3464, 1.7096], [1.9819, 1.0388, 1.9589]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9811], [0.5560], [0.5818], [0.6023], [0.5144], [0.5293], [0..., [0.5630], [0.5864], [0.8666], [0.8229]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp4-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.1585], [1.3826], [1.6215], [1.7210], [1.2405], [1.6052], [1...861], [1.1894], [1.2331], [1.0870], [1.5253]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9786], [0.6985], [0.5122], [0.6725], [0.7168], [0.9164], [0..., [0.5238], [0.7112], [0.6212], [0.8422]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp4-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.6571], [1.8944], [1.6842], [1.1502], [1.0124], [1.7699], [1...949], [1.9638], [1.5713], [1.3112], [1.9936]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5024], [0.6754], [0.5492], [0.7338], [0.9664], [0.8668], [0..., [0.7718], [0.9472], [0.7042], [0.7292]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp5-g0] ______________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([1.5332, 1.5201, 1.4454, 1.1001, 1.2318, 1.5097, 1.0765, 1.2196, 1.6092, 1.7288, 1.9291, 1.5800, 1.7816...7797, 1.2764, 1.3129, 1.6692, 1.1337, 1.2378, 1.3687, 1.4140, 1.4611], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.9128, 0.9112, 0.8612, 0.5586, 0.8351, 0.5495, 0.9017, 0.5980, 0.5166, 0.7106, 0.9464, 0.7328, 0.6230..., 0.8882, 0.5538, 0.6030, 0.5326, 0.7440, 0.6979, 0.6456, 0.5965], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype0-max-div-shp5-g1] ______________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([1.1185, 1.4804, 1.0801, 1.2872, 1.8984, 1.2138, 1.7913, 1.9121, 1.1607, 1.3808, 1.8768, 1.0997, 1.9325...5409, 1.7809, 1.2608, 1.3507, 1.8783, 1.0274, 1.5886, 1.3949, 1.3027], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.7827, 0.5207, 0.6622, 0.6201, 0.5258, 0.8746, 0.5414, 0.5161, 0.5927, 0.6058, 0.7551, 0.5707, 0.6253..., 0.5441, 0.8333, 0.7913, 0.9820, 0.5898, 0.7832, 0.6423, 0.7200], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: div, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp0-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[[[[[1.8557], [1.3782], [1.6370]]], [[[1.2109], [1.6080], ...0]]], [[[1.0707], [1.0542], [1.1916]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2547]], [[1.6391]], [[1.3923]]]], [[[[1.7800]], [[1.592... [[[[1.1226]], [[1.7332]], [[1.2772]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp0-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[[[[[1.9290], [1.7372], [1.2465]]], [[[1.0420], [1.1243], ...4]]], [[[1.3352], [1.2140], [1.5078]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2052]], [[1.2812]], [[1.1959]]]], [[[[1.3888]], [[1.679... [[[[1.1693]], [[1.6509]], [[1.3873]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp1-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[[1.9586, 1.4001, 1.0363], [1.3538, 1.0634, 1.2170], [1.6339, 1.3949, 1.8994]], [[1...7975], [1.6327, 1.3844, 1.2411], [1.0113, 1.1761, 1.7143]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.7706, 1.4119, 1.0584]], [[1.2617, 1.6390, 1.4696]], [[1.1904, 1.8668, 1.8117]], ...]], [[1.1825, 1.4651, 1.3179]], [[1.5594, 1.0765, 1.3467]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp1-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[[1.3552, 1.5302, 1.3097], [1.1976, 1.1386, 1.2172], [1.2556, 1.3798, 1.8829]], [[1...3458], [1.3066, 1.3085, 1.8999], [1.8419, 1.4895, 1.4541]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.7019, 1.8894, 1.0857]], [[1.1004, 1.4132, 1.6321]], [[1.8595, 1.2493, 1.9058]], ...]], [[1.5726, 1.9769, 1.8953]], [[1.5859, 1.6568, 1.2652]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp2-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.0172], [1.2563], [1.2927], [1.6495], [1.2118], [1.4789], [1...797], [1.0060], [1.9540], [1.5394], [1.0384]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3144, 1.4085, 1.6472], [1.1967, 1.7414, 1.1968], [1.4629, 1.0177, 1.7287], [1.9500,... 1.8202], [1.5742, 1.2605, 1.5702], [1.8549, 1.7707, 1.1461]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp2-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.7629], [1.0822], [1.4096], [1.9348], [1.7594], [1.4220], [1...941], [1.7248], [1.3497], [1.0425], [1.6349]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2233, 1.8076, 1.8601], [1.9226, 1.9567, 1.3510], [1.5600, 1.8173, 1.3451], [1.9922,... 1.2845], [1.5284, 1.6864, 1.9418], [1.4130, 1.2405, 1.9699]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp3-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.6936, 1.4210, 1.5262], [1.5869, 1.0020, 1.4415], [1.6106, 1.3497, 1.7641], [1.7250,... 1.8052], [1.7457, 1.5230, 1.6647], [1.3196, 1.1171, 1.7515]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9735], [1.1846], [1.7524], [1.2910], [1.5107], [1.2062], [1...813], [1.7986], [1.0666], [1.3502], [1.8126]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp3-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.2447, 1.3793, 1.9509], [1.1258, 1.3346, 1.6841], [1.7169, 1.2138, 1.4740], [1.2883,... 1.0676], [1.1325, 1.6658, 1.8523], [1.5435, 1.2352, 1.6464]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9388], [1.1394], [1.8445], [1.3334], [1.7890], [1.3686], [1...961], [1.2018], [1.6246], [1.9792], [1.4081]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp4-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.9547], [1.1594], [1.0984], [1.2676], [1.2246], [1.3607], [1...997], [1.7647], [1.1706], [1.7373], [1.6103]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7115], [1.3302], [1.9348], [1.2353], [1.6989], [1.5143], [1...618], [1.3858], [1.4908], [1.9168], [1.3557]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp4-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.8550], [1.9131], [1.2888], [1.7848], [1.6058], [1.1588], [1...925], [1.2656], [1.0036], [1.2322], [1.6880]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7658], [1.3081], [1.1305], [1.3972], [1.6224], [1.2001], [1...569], [1.6520], [1.3972], [1.0894], [1.2386]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp5-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([1.6549, 1.0410, 1.1282, 1.1575, 1.5230, 1.4727, 1.6556, 1.5115, 1.9761, 1.2806, 1.4091, 1.4066, 1.8613...3601, 1.6245, 1.9686, 1.0859, 1.9055, 1.8670, 1.1565, 1.2928, 1.2731], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.0396, 1.7046, 1.4045, 1.7233, 1.3256, 1.1508, 1.9587, 1.5670, 1.7239, 1.4728, 1.2002, 1.7366, 1.4047...5571, 1.3806, 1.9014, 1.0308, 1.9779, 1.3547, 1.6706, 1.8434, 1.0029], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_lhs-shp5-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([1.5548, 1.8828, 1.3486, 1.4642, 1.7283, 1.3010, 1.3972, 1.4866, 1.5815, 1.8775, 1.2983, 1.0779, 1.5725...7269, 1.2946, 1.0091, 1.6211, 1.5468, 1.2387, 1.1386, 1.0587, 1.2180], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.2013, 1.0485, 1.2032, 1.4787, 1.4942, 1.6647, 1.7072, 1.1744, 1.9542, 1.4039, 1.1993, 1.1829, 1.2555...2109, 1.5066, 1.1039, 1.5587, 1.4618, 1.5827, 1.2000, 1.0134, 1.4513], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp0-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[[[[[1.7748], [1.4194], [1.6031]]], [[[1.6095], [1.8141], ...2]]], [[[1.1950], [1.5572], [1.8135]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1939]], [[1.3210]], [[1.4601]]]], [[[[1.9744]], [[1.287... [[[[1.8486]], [[1.9241]], [[1.5460]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp0-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[[[[[1.2859], [1.0838], [1.5940]]], [[[1.4581], [1.2119], ...5]]], [[[1.7423], [1.6716], [1.5827]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4198]], [[1.3369]], [[1.5251]]]], [[[[1.0402]], [[1.854... [[[[1.7444]], [[1.9230]], [[1.2698]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp1-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[[1.3714, 1.4014, 1.9761], [1.9993, 1.5537, 1.6914], [1.0573, 1.3395, 1.7001]], [[1...8216], [1.0425, 1.6170, 1.4352], [1.4921, 1.6984, 1.6449]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.8329, 1.8482, 1.0133]], [[1.4595, 1.6175, 1.4524]], [[1.0651, 1.9387, 1.2262]], ...]], [[1.9083, 1.2817, 1.7250]], [[1.0961, 1.2242, 1.3015]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp1-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[[1.2709, 1.5212, 1.7502], [1.0007, 1.1331, 1.8916], [1.4736, 1.8831, 1.4634]], [[1...5446], [1.7907, 1.5652, 1.3110], [1.2057, 1.7891, 1.5762]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.1248, 1.1289, 1.1253]], [[1.8959, 1.6283, 1.4443]], [[1.5553, 1.0877, 1.5808]], ...]], [[1.9308, 1.7482, 1.6999]], [[1.2333, 1.2498, 1.4930]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp2-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.2867], [1.6235], [1.5133], [1.6254], [1.9541], [1.3302], [1...589], [1.7100], [1.0595], [1.5173], [1.8566]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4095, 1.6785, 1.4640], [1.5784, 1.4875, 1.7035], [1.7531, 1.5251, 1.6965], [1.1804,... 1.2584], [1.8459, 1.0736, 1.7982], [1.7326, 1.5845, 1.0106]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp2-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.3267], [1.3109], [1.4414], [1.4565], [1.3597], [1.5789], [1...649], [1.9799], [1.6551], [1.9962], [1.7384]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3601, 1.3408, 1.7471], [1.3631, 1.9631, 1.7610], [1.0283, 1.4686, 1.7756], [1.2092,... 1.8092], [1.9578, 1.4974, 1.9805], [1.7869, 1.3296, 1.0686]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp3-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.1632, 1.6662, 1.3166], [1.3862, 1.6820, 1.6806], [1.8191, 1.3018, 1.2840], [1.6399,... 1.0092], [1.8077, 1.1123, 1.8079], [1.2413, 1.0060, 1.4218]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8514], [1.4549], [1.4747], [1.9970], [1.1620], [1.2223], [1...396], [1.3946], [1.3854], [1.9016], [1.7262]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp3-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.1036, 1.2782, 1.7402], [1.9434, 1.1302, 1.7559], [1.6385, 1.4987, 1.4572], [1.2217,... 1.2525], [1.2952, 1.3906, 1.6127], [1.2043, 1.7781, 1.8771]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2115], [1.4252], [1.2264], [1.6249], [1.6005], [1.3187], [1...908], [1.9020], [1.4636], [1.6750], [1.0893]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp4-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.0752], [1.8211], [1.2676], [1.0775], [1.5246], [1.8727], [1...745], [1.4357], [1.8042], [1.7643], [1.5967]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4779], [1.2015], [1.2862], [1.7438], [1.6801], [1.1645], [1...570], [1.1161], [1.5073], [1.3563], [1.6189]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp4-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.9732], [1.3294], [1.0238], [1.4987], [1.7005], [1.0264], [1...411], [1.2375], [1.9828], [1.7104], [1.3851]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0920], [1.7312], [1.5843], [1.1044], [1.8233], [1.0173], [1...516], [1.0609], [1.2289], [1.9156], [1.8980]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp5-g0] ____________________ idtype = torch.int32 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([1.8754, 1.8211, 1.0742, 1.1501, 1.2496, 1.0345, 1.6998, 1.9336, 1.9475, 1.7190, 1.9787, 1.5645, 1.8272...4583, 1.4130, 1.2605, 1.3066, 1.9594, 1.2147, 1.0318, 1.0781, 1.3308], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.2299, 1.6775, 1.1920, 1.9270, 1.1895, 1.6898, 1.5972, 1.9073, 1.8616, 1.1886, 1.8586, 1.6501, 1.0375...2806, 1.8451, 1.0185, 1.3740, 1.7539, 1.3936, 1.2735, 1.3822, 1.0950], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype0-max-copy_rhs-shp5-g1] ____________________ idtype = torch.int32 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([1.7403, 1.9285, 1.3275, 1.1240, 1.5780, 1.5464, 1.6707, 1.7432, 1.5645, 1.8457, 1.6308, 1.7242, 1.7206...5987, 1.8551, 1.4477, 1.5800, 1.4665, 1.6824, 1.4540, 1.8206, 1.6907], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.5246, 1.7338, 1.2168, 1.3272, 1.7921, 1.9183, 1.7908, 1.0481, 1.0852, 1.9479, 1.4978, 1.4498, 1.9464...7350, 1.0411, 1.2815, 1.5690, 1.0771, 1.1450, 1.7969, 1.0667, 1.4873], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_rhs, reduce func: max) ______________________ test_spmm[idtype1-sum-add-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[[[[1.9590], [1.3813], [1.4641]]], [[[1.5620], [1.5445], ...2]]], [[[1.8293], [1.7029], [1.9603]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9900]], [[1.8275]], [[1.6995]]]], [[[[1.1445]], [[1.685... [[[[1.2008]], [[1.5227]], [[1.2191]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[[[[1.7196], [1.0863], [1.5461]]], [[[1.4638], [1.3980], ...2]]], [[[1.5326], [1.4283], [1.5090]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3184]], [[1.5259]], [[1.6907]]]], [[[[1.0793]], [[1.207... [[[[1.4529]], [[1.6614]], [[1.7628]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[1.9465, 1.5019, 1.9380], [1.1939, 1.9451, 1.9655], [1.0798, 1.2653, 1.9047]], [[1...3687], [1.4358, 1.6922, 1.7388], [1.9975, 1.9660, 1.4456]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.9018, 1.7721, 1.1130]], [[1.0444, 1.5432, 1.1949]], [[1.4461, 1.3231, 1.4779]], ...]], [[1.1177, 1.4325, 1.5220]], [[1.7967, 1.8107, 1.3115]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[1.0080, 1.8381, 1.3458], [1.6152, 1.0019, 1.6085], [1.6218, 1.1245, 1.9025]], [[1...9798], [1.2207, 1.1349, 1.5382], [1.5135, 1.1109, 1.7371]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.7292, 1.2594, 1.2661]], [[1.6163, 1.2889, 1.8563]], [[1.2680, 1.1005, 1.0056]], ...]], [[1.2425, 1.3642, 1.5386]], [[1.7940, 1.6761, 1.9921]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.5339], [1.9754], [1.5769], [1.8768], [1.9460], [1.4810], [1...629], [1.4685], [1.4404], [1.0102], [1.2066]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9636, 1.1689, 1.9173], [1.1647, 1.9737, 1.4318], [1.1844, 1.1581, 1.0655], [1.7096,... 1.5382], [1.4963, 1.5669, 1.9194], [1.5355, 1.9141, 1.9981]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.8236], [1.6962], [1.1676], [1.5271], [1.5589], [1.6043], [1...051], [1.3447], [1.1225], [1.4677], [1.0953]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0663, 1.9224, 1.2218], [1.4590, 1.3918, 1.6436], [1.1498, 1.0902, 1.0309], [1.9932,... 1.3018], [1.7125, 1.6099, 1.1223], [1.0478, 1.6962, 1.9119]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.4106, 1.0011, 1.6612], [1.6426, 1.4639, 1.3215], [1.9818, 1.4399, 1.8933], [1.5671,... 1.8889], [1.2385, 1.4962, 1.0406], [1.6918, 1.5600, 1.7445]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9069], [1.4577], [1.6101], [1.5220], [1.7614], [1.9455], [1...830], [1.0519], [1.9957], [1.9703], [1.4833]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.2425, 1.0916, 1.1371], [1.5163, 1.9590, 1.6362], [1.4230, 1.8243, 1.3559], [1.1948,... 1.3989], [1.0130, 1.5549, 1.8336], [1.7381, 1.5061, 1.8212]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9846], [1.4744], [1.0386], [1.2194], [1.3553], [1.0072], [1...655], [1.5706], [1.1338], [1.7391], [1.5881]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.6979], [1.4976], [1.1769], [1.2819], [1.9991], [1.2906], [1...742], [1.8459], [1.6854], [1.3552], [1.8876]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6297], [1.8047], [1.4807], [1.1301], [1.3427], [1.2611], [1...466], [1.0430], [1.2389], [1.4661], [1.6237]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.4676], [1.8453], [1.2619], [1.7172], [1.5700], [1.6069], [1...272], [1.5323], [1.9195], [1.8061], [1.6204]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3990], [1.1973], [1.3856], [1.3935], [1.1838], [1.5023], [1...617], [1.4686], [1.9449], [1.9466], [1.3712]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([1.0566, 1.6046, 1.0657, 1.7811, 1.6443, 1.6422, 1.6756, 1.2653, 1.4132, 1.0449, 1.5618, 1.9736, 1.2573...8284, 1.5187, 1.1639, 1.1414, 1.0887, 1.9097, 1.3085, 1.9753, 1.0912], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.7370, 1.9829, 1.5376, 1.5724, 1.6898, 1.3567, 1.7435, 1.3544, 1.2646, 1.6375, 1.5233, 1.3608, 1.1654...0587, 1.0287, 1.3322, 1.3305, 1.6188, 1.3851, 1.4217, 1.2990, 1.2648], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-add-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'add', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([1.9104, 1.4930, 1.5941, 1.7849, 1.8509, 1.4751, 1.4084, 1.6963, 1.3984, 1.0809, 1.9582, 1.2274, 1.9283...8990, 1.3016, 1.8106, 1.0469, 1.9544, 1.4349, 1.2761, 1.8001, 1.4912], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.6607, 1.9699, 1.7756, 1.5934, 1.4169, 1.0750, 1.6201, 1.6987, 1.8453, 1.5798, 1.9245, 1.0506, 1.1664...6890, 1.2962, 1.4046, 1.5960, 1.4912, 1.3843, 1.1693, 1.5746, 1.9482], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: add, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[[[[1.4017], [1.3021], [1.4060]]], [[[1.9332], [1.1315], ...5]]], [[[1.0123], [1.7103], [1.7058]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.0203]], [[-1.4523]], [[-1.2732]]]], [[[[-1.2772]], [[-... [[[[-1.9409]], [[-1.1286]], [[-1.1472]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[[[[1.8348], [1.0356], [1.0929]]], [[[1.5014], [1.9266], ...6]]], [[[1.1606], [1.6438], [1.7354]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.6229]], [[-1.4845]], [[-1.7832]]]], [[[[-1.0560]], [[-... [[[[-1.4471]], [[-1.5106]], [[-1.4801]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[1.3813, 1.1439, 1.1474], [1.5304, 1.4805, 1.3921], [1.9278, 1.8214, 1.4486]], [[1...1382], [1.4072, 1.5694, 1.0207], [1.0535, 1.8369, 1.8469]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.9666, -1.2291, -1.3630]], [[-1.8404, -1.1437, -1.3897]], [[-1.4856, -1.4653, -1.2057]],...1.3892, -1.5502, -1.2166]], [[-1.8367, -1.5608, -1.1268]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[[1.0819, 1.6953, 1.6903], [1.6790, 1.6554, 1.3711], [1.4616, 1.1941, 1.5664]], [[1...8060], [1.7620, 1.9121, 1.8570], [1.0681, 1.1227, 1.4061]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.7827, -1.0772, -1.5389]], [[-1.7541, -1.6162, -1.5942]], [[-1.4964, -1.4012, -1.6526]],...1.5314, -1.0840, -1.8791]], [[-1.2470, -1.0677, -1.8311]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.1221], [1.6230], [1.3191], [1.1245], [1.3120], [1.1018], [1...852], [1.4298], [1.9545], [1.7264], [1.9931]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8124, -1.7985, -1.4755], [-1.7191, -1.4346, -1.0410], [-1.0353, -1.1614, -1.9693], ... [-1.5248, -1.2576, -1.4448], [-1.3623, -1.4248, -1.6111]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.3708], [1.4207], [1.0720], [1.5377], [1.6571], [1.3789], [1...310], [1.8326], [1.3604], [1.8040], [1.3998]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.9836, -1.4379, -1.8448], [-1.5777, -1.8902, -1.8768], [-1.7533, -1.8706, -1.6267], ... [-1.7475, -1.4675, -1.9558], [-1.1816, -1.7097, -1.5514]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.7504, 1.7120, 1.1756], [1.9901, 1.0901, 1.4582], [1.2202, 1.7173, 1.8631], [1.5366,... 1.9680], [1.1770, 1.9975, 1.5464], [1.2292, 1.5722, 1.2913]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.3902], [-1.6250], [-1.5946], [-1.1517], [-1.6409], [-1.6333], ... [-1.8791], [-1.5591], [-1.0976], [-1.7082]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.9437, 1.3265, 1.9900], [1.1226, 1.3537, 1.6065], [1.3489, 1.9140, 1.8898], [1.6430,... 1.6326], [1.5264, 1.5909, 1.2813], [1.3065, 1.5928, 1.0477]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.1767], [-1.5158], [-1.6504], [-1.7435], [-1.2121], [-1.1788], ... [-1.0987], [-1.1166], [-1.7169], [-1.1293]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.1972], [1.0537], [1.5912], [1.5107], [1.4551], [1.7715], [1...426], [1.0088], [1.6175], [1.0794], [1.3400]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.6006], [-1.5744], [-1.6139], [-1.1359], [-1.4277], [-1.7456], ... [-1.6696], [-1.9944], [-1.3820], [-1.4215]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([[1.1355], [1.0976], [1.6875], [1.5762], [1.2718], [1.6151], [1...787], [1.0494], [1.0230], [1.4540], [1.2801]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.4029], [-1.0429], [-1.8518], [-1.9069], [-1.4328], [-1.1749], ... [-1.6455], [-1.2488], [-1.8961], [-1.4914]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([1.2712, 1.0310, 1.3699, 1.0337, 1.6787, 1.3711, 1.3328, 1.9993, 1.6272, 1.7678, 1.7575, 1.7615, 1.1168...3504, 1.4146, 1.6309, 1.4586, 1.7031, 1.5621, 1.8300, 1.0398, 1.2227], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.2878, -1.8963, -1.7098, -1.0219, -1.2627, -1.3822, -1.7609, -1.2647, -1.8582, -1.7254, -1.3636, -1....9334, -1.8816, -1.4167, -1.4158, -1.0699, -1.5390, -1.6575], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-sub-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'sub', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'sum' lhs_data = tensor([1.7103, 1.7761, 1.4541, 1.1740, 1.3146, 1.8828, 1.2236, 1.5058, 1.5811, 1.0827, 1.8687, 1.7723, 1.4835...4525, 1.4561, 1.4894, 1.1691, 1.2874, 1.6025, 1.3527, 1.9124, 1.8299], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.6108, -1.2814, -1.6885, -1.0054, -1.6392, -1.0609, -1.5444, -1.9433, -1.1341, -1.0589, -1.1218, -1....5558, -1.3582, -1.5204, -1.2757, -1.6942, -1.9377, -1.7919], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: sub, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[[[[1.4385], [1.7007], [1.6448]]], [[[1.3995], [1.0889], ...5]]], [[[1.8936], [1.9006], [1.1164]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4722]], [[1.0473]], [[1.0957]]]], [[[[1.1487]], [[1.963... [[[[1.7808]], [[1.0508]], [[1.5724]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[[[[1.5449], [1.4723], [1.6888]]], [[[1.2473], [1.4352], ...8]]], [[[1.6659], [1.4314], [1.1698]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2735]], [[1.6168]], [[1.0888]]]], [[[[1.1797]], [[1.438... [[[[1.9650]], [[1.7153]], [[1.8157]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[1.9862, 1.2386, 1.5903], [1.8031, 1.3953, 1.6375], [1.3190, 1.2847, 1.7156]], [[1...9726], [1.1223, 1.7131, 1.4010], [1.0354, 1.8621, 1.1066]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.9512, 1.6853, 1.9368]], [[1.0624, 1.2073, 1.1803]], [[1.4618, 1.8211, 1.2640]], ...]], [[1.9426, 1.3838, 1.9163]], [[1.2791, 1.1296, 1.7781]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[1.5819, 1.0015, 1.1361], [1.3415, 1.7774, 1.4701], [1.5070, 1.5408, 1.6602]], [[1...8217], [1.0256, 1.2239, 1.9355], [1.2985, 1.6669, 1.3882]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.8736, 1.6301, 1.3182]], [[1.1271, 1.4420, 1.1706]], [[1.7582, 1.9688, 1.2796]], ...]], [[1.9424, 1.8408, 1.5395]], [[1.8296, 1.7860, 1.1986]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.6026], [1.1780], [1.7234], [1.3812], [1.1804], [1.0828], [1...697], [1.7235], [1.0519], [1.6541], [1.9086]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3962, 1.8464, 1.2005], [1.2285, 1.3697, 1.0651], [1.9317, 1.3835, 1.1697], [1.4913,... 1.2771], [1.3437, 1.8098, 1.0819], [1.1743, 1.4121, 1.1606]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.2339], [1.5265], [1.6499], [1.5901], [1.6148], [1.2467], [1...962], [1.4892], [1.5193], [1.7435], [1.7881]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5030, 1.2868, 1.2985], [1.8815, 1.3756, 1.2761], [1.3366, 1.1614, 1.6079], [1.5954,... 1.8972], [1.5664, 1.4177, 1.8358], [1.3895, 1.8056, 1.4485]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.1183, 1.0680, 1.4964], [1.7711, 1.1008, 1.5546], [1.6423, 1.9512, 1.5329], [1.6648,... 1.3739], [1.2981, 1.8005, 1.1427], [1.8512, 1.1015, 1.8171]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1579], [1.1475], [1.2842], [1.2111], [1.0553], [1.4200], [1...761], [1.7249], [1.3724], [1.4161], [1.8300]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.5482, 1.4010, 1.9620], [1.8252, 1.9546, 1.0203], [1.0198, 1.4791, 1.0050], [1.6436,... 1.4173], [1.6547, 1.1332, 1.4478], [1.3360, 1.9226, 1.5354]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5535], [1.6841], [1.7008], [1.3310], [1.1638], [1.0454], [1...280], [1.8878], [1.6588], [1.4414], [1.7579]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.6683], [1.9697], [1.3946], [1.6888], [1.3422], [1.5867], [1...629], [1.7310], [1.5094], [1.9804], [1.8539]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9398], [1.4507], [1.3131], [1.8713], [1.3989], [1.7352], [1...331], [1.8321], [1.6142], [1.9013], [1.3965]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.8493], [1.2547], [1.4678], [1.5707], [1.1762], [1.7696], [1...160], [1.1654], [1.2978], [1.4658], [1.3484]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9784], [1.6214], [1.8322], [1.6529], [1.6067], [1.1147], [1...666], [1.0670], [1.9236], [1.9535], [1.3905]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([1.0900, 1.5596, 1.0163, 1.2366, 1.1149, 1.0651, 1.3200, 1.0131, 1.1100, 1.8606, 1.8904, 1.4584, 1.1638...4573, 1.0903, 1.2263, 1.9710, 1.5769, 1.9625, 1.7841, 1.4869, 1.3011], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.8594, 1.4158, 1.0343, 1.1527, 1.4824, 1.9724, 1.5400, 1.2552, 1.0039, 1.7558, 1.0647, 1.8020, 1.5410...9588, 1.9700, 1.7919, 1.6260, 1.6866, 1.7335, 1.4692, 1.1104, 1.6920], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-mul-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'mul', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([1.8864, 1.7254, 1.2853, 1.5635, 1.8530, 1.2006, 1.7270, 1.3487, 1.4432, 1.2469, 1.9619, 1.2554, 1.6753...2551, 1.2187, 1.3963, 1.9705, 1.2969, 1.7317, 1.8432, 1.5580, 1.2253], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.9463, 1.0393, 1.5744, 1.8254, 1.7578, 1.4840, 1.1708, 1.6098, 1.8541, 1.0537, 1.5115, 1.9501, 1.2951...3765, 1.4009, 1.4053, 1.9185, 1.4224, 1.3911, 1.7552, 1.2900, 1.4991], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: mul, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[[[[1.3799], [1.6150], [1.0065]]], [[[1.1020], [1.2968], ...3]]], [[[1.5334], [1.7261], [1.4070]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5200]], [[0.6409]], [[0.8493]]]], [[[[0.6101]], [[0.509... [[[[0.7261]], [[0.5533]], [[0.6747]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[[[[1.4798], [1.7354], [1.9145]]], [[[1.5692], [1.6292], ...7]]], [[[1.3515], [1.7379], [1.8931]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5726]], [[0.8688]], [[0.7166]]]], [[[[0.5324]], [[0.504... [[[[0.8489]], [[0.5195]], [[0.9099]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[1.8159, 1.5376, 1.7199], [1.0274, 1.3888, 1.7630], [1.5737, 1.0614, 1.1303]], [[1...3613], [1.3806, 1.6053, 1.0124], [1.3637, 1.5941, 1.8091]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.8715, 0.7792, 0.5211]], [[0.7082, 0.5267, 0.9934]], [[0.9772, 0.5194, 0.9411]], ... [[0.7167, 0.8200, 0.7908]], [[0.6562, 0.6958, 0.6318]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[1.1268, 1.0420, 1.3624], [1.1015, 1.2097, 1.4692], [1.6067, 1.0514, 1.6071]], [[1...6788], [1.5679, 1.8355, 1.8997], [1.5042, 1.5453, 1.7477]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.5457, 0.9158, 0.6392]], [[0.5956, 0.6642, 0.6786]], [[0.5556, 0.6246, 0.6471]], ... [[0.7507, 0.6567, 0.6867]], [[0.5462, 0.7397, 0.7714]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.8442], [1.0511], [1.8672], [1.1656], [1.3264], [1.1233], [1...758], [1.8897], [1.8559], [1.8904], [1.4276]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5664, 0.7647, 0.6801], [0.9580, 0.5788, 0.7333], [0.9582, 0.6912, 0.7593], [0.5909,...257], [0.8243, 0.5953, 0.6771], [0.5814, 0.5174, 0.6744]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.8763], [1.9628], [1.3925], [1.9086], [1.2764], [1.4012], [1...492], [1.2418], [1.9014], [1.8320], [1.2668]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7082, 0.9146, 0.5707], [0.6167, 0.7269, 0.6892], [0.7979, 0.9655, 0.7932], [0.5022,...238], [0.6508, 0.7432, 0.7589], [0.6378, 0.8237, 0.9224]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.2943, 1.7322, 1.3901], [1.5139, 1.3192, 1.8698], [1.3034, 1.1595, 1.3942], [1.7655,... 1.3730], [1.8400, 1.6642, 1.0901], [1.4289, 1.4916, 1.4630]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6401], [0.8621], [0.7813], [0.5103], [0.5189], [0.5073], [0..., [0.5073], [0.7184], [0.7099], [0.6830]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.1817, 1.7159, 1.7889], [1.3671, 1.5372, 1.2907], [1.7830, 1.9551, 1.8556], [1.0553,... 1.0780], [1.0417, 1.0749, 1.4547], [1.7992, 1.1053, 1.1828]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6548], [0.7000], [0.6429], [0.7911], [0.6993], [0.8968], [0..., [0.7133], [0.5982], [0.6422], [0.5552]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.9722], [1.1213], [1.1088], [1.7359], [1.8794], [1.7525], [1...988], [1.5390], [1.2658], [1.3736], [1.9788]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7489], [0.6259], [0.6415], [0.6672], [0.9453], [0.6035], [0..., [0.8960], [0.5421], [0.5528], [0.7000]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1.1799], [1.1100], [1.4682], [1.6186], [1.7700], [1.1562], [1...588], [1.3699], [1.3363], [1.8482], [1.8526]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6047], [0.8038], [0.7028], [0.5133], [0.5064], [0.9456], [0..., [0.5768], [0.5190], [0.9973], [0.9444]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([1.9472, 1.5994, 1.6640, 1.5543, 1.8757, 1.2315, 1.1515, 1.3723, 1.2283, 1.3012, 1.1891, 1.3534, 1.8446...4655, 1.3585, 1.3223, 1.7618, 1.0561, 1.2168, 1.7424, 1.1325, 1.3242], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.9605, 0.8879, 0.5772, 0.6767, 0.5919, 0.8494, 0.5639, 0.5617, 0.9977, 0.9070, 0.7286, 0.5567, 0.8567..., 0.6590, 0.6702, 0.7103, 0.5191, 0.5807, 0.6962, 0.7916, 0.5462], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: div, reduce func: sum) ______________________ test_spmm[idtype1-sum-div-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'div', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([1.1421, 1.3980, 1.3958, 1.1674, 1.4836, 1.8906, 1.2427, 1.8017, 1.1345, 1.8977, 1.6261, 1.9619, 1.0013...9388, 1.9209, 1.7353, 1.9747, 1.4079, 1.6914, 1.6582, 1.0033, 1.5640], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.5378, 0.5013, 0.7749, 0.9032, 0.9497, 0.8555, 0.5447, 0.5307, 0.5015, 0.5926, 0.8675, 0.6255, 0.8815..., 0.5427, 0.7440, 0.8837, 0.6241, 0.6629, 0.5095, 0.9686, 0.9230], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: div, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp0-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[[[[[1.9656], [1.1263], [1.8371]]], [[[1.8688], [1.0640], ...5]]], [[[1.8659], [1.8259], [1.5949]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4279]], [[1.7153]], [[1.5640]]]], [[[[1.3120]], [[1.512... [[[[1.3728]], [[1.7397]], [[1.4268]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp0-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[[[[[1.2846], [1.5743], [1.9885]]], [[[1.7539], [1.7718], ...8]]], [[[1.5873], [1.8830], [1.9861]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1291]], [[1.9296]], [[1.6768]]]], [[[[1.0048]], [[1.306... [[[[1.9846]], [[1.4790]], [[1.0806]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp1-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[[1.6395, 1.6252, 1.8576], [1.1737, 1.5628, 1.1700], [1.8471, 1.2822, 1.6228]], [[1...4093], [1.1318, 1.4383, 1.3144], [1.8556, 1.2756, 1.2838]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.4989, 1.5987, 1.1602]], [[1.5305, 1.8715, 1.6367]], [[1.7419, 1.1818, 1.8809]], ...]], [[1.3107, 1.9559, 1.3080]], [[1.8068, 1.6875, 1.7043]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp1-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[[1.5425, 1.0112, 1.3100], [1.5294, 1.7102, 1.8570], [1.5733, 1.4406, 1.8329]], [[1...7502], [1.0756, 1.0151, 1.0475], [1.1834, 1.5998, 1.8751]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.9234, 1.1191, 1.7297]], [[1.9583, 1.7130, 1.4492]], [[1.4947, 1.1560, 1.5657]], ...]], [[1.7480, 1.7266, 1.9032]], [[1.6557, 1.2166, 1.3794]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp2-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.2849], [1.9471], [1.2280], [1.0398], [1.1139], [1.2108], [1...637], [1.1524], [1.0120], [1.3354], [1.2541]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2137, 1.6744, 1.4004], [1.8149, 1.0704, 1.9954], [1.0102, 1.0363, 1.4953], [1.0253,... 1.1827], [1.4201, 1.3642, 1.9882], [1.2645, 1.3888, 1.6087]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp2-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.2235], [1.4667], [1.1582], [1.0059], [1.5625], [1.8636], [1...172], [1.8308], [1.4035], [1.6593], [1.7022]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5205, 1.1763, 1.6230], [1.2805, 1.9518, 1.5077], [1.3653, 1.6416, 1.3277], [1.1121,... 1.8070], [1.8039, 1.2296, 1.9037], [1.0199, 1.8401, 1.9346]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp3-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.2030, 1.1628, 1.4151], [1.0991, 1.3831, 1.4098], [1.8092, 1.5743, 1.1144], [1.2254,... 1.4681], [1.9398, 1.9435, 1.8548], [1.2849, 1.0713, 1.2894]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8526], [1.0118], [1.7216], [1.4453], [1.8715], [1.6908], [1...989], [1.9659], [1.5678], [1.9465], [1.1170]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp3-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.2375, 1.6829, 1.7505], [1.6399, 1.0153, 1.7637], [1.8919, 1.0466, 1.7417], [1.7962,... 1.6201], [1.3385, 1.8322, 1.3304], [1.6596, 1.8411, 1.1979]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0040], [1.2383], [1.1180], [1.8387], [1.4332], [1.9495], [1...847], [1.5549], [1.6257], [1.5654], [1.8516]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp4-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.8999], [1.4458], [1.7357], [1.3922], [1.9630], [1.8944], [1...887], [1.4317], [1.7635], [1.1341], [1.7529]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9710], [1.2617], [1.2387], [1.4737], [1.2326], [1.1427], [1...347], [1.6674], [1.5093], [1.6918], [1.8887]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp4-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[1.5662], [1.6983], [1.8147], [1.3186], [1.7071], [1.7492], [1...148], [1.8578], [1.7017], [1.0520], [1.0366]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8457], [1.1255], [1.6835], [1.1320], [1.7651], [1.1515], [1...919], [1.1819], [1.6263], [1.3923], [1.2164]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp5-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([1.4397, 1.5967, 1.8072, 1.0524, 1.7289, 1.5063, 1.1009, 1.4451, 1.9126, 1.1190, 1.6235, 1.2619, 1.1558...4384, 1.9773, 1.2082, 1.2912, 1.4578, 1.1719, 1.4640, 1.7758, 1.6153], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.7867, 1.6823, 1.0589, 1.7804, 1.2350, 1.1154, 1.1219, 1.7200, 1.6248, 1.9741, 1.4602, 1.3868, 1.9751...4220, 1.9897, 1.6066, 1.8044, 1.7518, 1.5158, 1.9954, 1.8651, 1.4057], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_lhs-shp5-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_lhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([1.1732, 1.6152, 1.9398, 1.4631, 1.9603, 1.8152, 1.9534, 1.1104, 1.7235, 1.2067, 1.9356, 1.3969, 1.7886...3025, 1.0327, 1.2162, 1.6816, 1.9424, 1.2937, 1.4508, 1.3610, 1.8060], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.5405, 1.2184, 1.8584, 1.0676, 1.8614, 1.8849, 1.9240, 1.5430, 1.0997, 1.9175, 1.0570, 1.2426, 1.3183...1589, 1.3351, 1.3697, 1.4507, 1.6558, 1.0853, 1.7856, 1.9017, 1.9853], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_lhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp0-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[[[[[1.7501], [1.5715], [1.9112]]], [[[1.7885], [1.5950], ...6]]], [[[1.5544], [1.8755], [1.0282]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7314]], [[1.0892]], [[1.3785]]]], [[[[1.7192]], [[1.479... [[[[1.0676]], [[1.1762]], [[1.1476]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp0-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[[[[[1.6862], [1.6579], [1.9593]]], [[[1.8568], [1.8319], ...7]]], [[[1.3825], [1.1748], [1.7879]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2593]], [[1.1093]], [[1.8061]]]], [[[[1.7069]], [[1.029... [[[[1.3702]], [[1.3992]], [[1.2459]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp1-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[[1.1500, 1.3343, 1.7756], [1.8189, 1.8814, 1.4228], [1.5445, 1.2928, 1.2122]], [[1...4407], [1.9386, 1.6416, 1.6561], [1.7920, 1.1707, 1.0474]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.0360, 1.0357, 1.3306]], [[1.1459, 1.7509, 1.3289]], [[1.2994, 1.4550, 1.1867]], ...]], [[1.0866, 1.9236, 1.3418]], [[1.0353, 1.1426, 1.9460]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp1-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[[1.2631, 1.9168, 1.5887], [1.1123, 1.7291, 1.8944], [1.1037, 1.7952, 1.1625]], [[1...3452], [1.0557, 1.7501, 1.5561], [1.5298, 1.3565, 1.3975]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.0540, 1.4496, 1.1693]], [[1.5534, 1.8674, 1.1064]], [[1.0240, 1.8367, 1.9743]], ...]], [[1.3109, 1.4165, 1.1420]], [[1.4634, 1.8744, 1.3742]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp2-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.1089], [1.4120], [1.1556], [1.3201], [1.8525], [1.1020], [1...242], [1.5511], [1.9383], [1.7984], [1.0562]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9280, 1.5896, 1.0235], [1.8002, 1.5186, 1.9182], [1.8257, 1.7104, 1.4510], [1.5118,... 1.8043], [1.2972, 1.0286, 1.3006], [1.5840, 1.6892, 1.0126]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp2-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.4957], [1.7674], [1.2941], [1.7876], [1.3636], [1.0150], [1...099], [1.4470], [1.8882], [1.8910], [1.0219]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6880, 1.1646, 1.9590], [1.4536, 1.5842, 1.8182], [1.4575, 1.9571, 1.0425], [1.5290,... 1.1937], [1.6935, 1.4754, 1.2056], [1.7008, 1.5021, 1.2136]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp3-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.6090, 1.0799, 1.4881], [1.1757, 1.2642, 1.7783], [1.3282, 1.6448, 2.0000], [1.8021,... 1.9838], [1.4876, 1.1194, 1.3461], [1.9431, 1.5817, 1.3629]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1644], [1.4646], [1.4633], [1.8686], [1.3885], [1.3249], [1...990], [1.5472], [1.9084], [1.6722], [1.6511]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp3-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.9546, 1.6114, 1.1086], [1.8216, 1.1505, 1.9370], [1.7012, 1.0445, 1.7861], [1.4284,... 1.5568], [1.0342, 1.2805, 1.2094], [1.4141, 1.6782, 1.1892]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2427], [1.8736], [1.5317], [1.7974], [1.3775], [1.6441], [1...222], [1.1379], [1.2171], [1.9604], [1.7835]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp4-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.2293], [1.1290], [1.2241], [1.8260], [1.0473], [1.2407], [1...062], [1.4298], [1.6843], [1.6183], [1.3251]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0229], [1.3506], [1.2539], [1.0700], [1.1630], [1.9627], [1...091], [1.1501], [1.0034], [1.8029], [1.0454]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp4-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([[1.6017], [1.2612], [1.4638], [1.4273], [1.1989], [1.5673], [1...083], [1.0274], [1.4967], [1.3953], [1.2881]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2453], [1.4729], [1.0863], [1.6248], [1.7602], [1.0603], [1...914], [1.1655], [1.7968], [1.8463], [1.0369]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp5-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([1.8845, 1.7952, 1.2221, 1.7357, 1.1579, 1.8971, 1.4628, 1.7156, 1.9189, 1.7702, 1.6391, 1.5604, 1.0444...3942, 1.7537, 1.9665, 1.7342, 1.9722, 1.8343, 1.2691, 1.8725, 1.4181], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.5586, 1.4287, 1.2979, 1.7531, 1.2304, 1.3777, 1.8594, 1.9815, 1.4219, 1.2679, 1.4820, 1.4887, 1.9938...7738, 1.3959, 1.6758, 1.6481, 1.6847, 1.2095, 1.4866, 1.6150, 1.0682], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_rhs, reduce func: sum) ___________________ test_spmm[idtype1-sum-copy_rhs-shp5-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_rhs', reducer = 'sum' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum' lhs_data = tensor([1.3158, 1.1246, 1.5951, 1.4251, 1.0783, 1.5706, 1.3217, 1.3423, 1.3151, 1.4528, 1.4928, 1.6552, 1.4010...8836, 1.1844, 1.1143, 1.3958, 1.4157, 1.6525, 1.8381, 1.7564, 1.0059], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.8282, 1.0095, 1.6740, 1.9940, 1.2973, 1.4253, 1.3036, 1.8156, 1.0124, 1.5477, 1.4778, 1.6547, 1.8698...1566, 1.1537, 1.0246, 1.2479, 1.6438, 1.7058, 1.6035, 1.9154, 1.8042], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_rhs, reduce func: sum) ______________________ test_spmm[idtype1-min-add-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[[[[1.7863], [1.9733], [1.2908]]], [[[1.8563], [1.4619], ...4]]], [[[1.5360], [1.2621], [1.9066]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3431]], [[1.7339]], [[1.1363]]]], [[[[1.1050]], [[1.668... [[[[1.7111]], [[1.8518]], [[1.7723]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[[[[1.4596], [1.6091], [1.0703]]], [[[1.2706], [1.5354], ...4]]], [[[1.8847], [1.7672], [1.1616]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9535]], [[1.4220]], [[1.4534]]]], [[[[1.8113]], [[1.921... [[[[1.6969]], [[1.3244]], [[1.8695]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[1.1305, 1.3726, 1.7812], [1.0212, 1.0640, 1.8234], [1.3354, 1.4905, 1.4655]], [[1...7009], [1.8148, 1.6987, 1.2123], [1.2292, 1.9980, 1.9290]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.8286, 1.7842, 1.1345]], [[1.4894, 1.6708, 1.3640]], [[1.8371, 1.1287, 1.2263]], ...]], [[1.0952, 1.8119, 1.0144]], [[1.8524, 1.0381, 1.4766]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[1.3584, 1.6938, 1.1930], [1.5138, 1.7168, 1.3783], [1.7482, 1.2080, 1.8110]], [[1...3464], [1.2629, 1.9246, 1.9509], [1.7930, 1.5335, 1.4229]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.9488, 1.1751, 1.9396]], [[1.9080, 1.8904, 1.9726]], [[1.9493, 1.7334, 1.0078]], ...]], [[1.8341, 1.2008, 1.5922]], [[1.8461, 1.3156, 1.0577]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.2949], [1.1805], [1.3332], [1.6845], [1.5164], [1.2397], [1...785], [1.9539], [1.7731], [1.3985], [1.0774]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7068, 1.3119, 1.6463], [1.3915, 1.2731, 1.3493], [1.9395, 1.5912, 1.3531], [1.5068,... 1.8494], [1.3636, 1.5990, 1.3398], [1.5070, 1.6512, 1.1571]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.2428], [1.0831], [1.0862], [1.0935], [1.9921], [1.4539], [1...339], [1.4943], [1.2691], [1.6382], [1.3724]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9813, 1.6664, 1.9832], [1.2013, 1.2083, 1.1618], [1.4904, 1.2059, 1.3680], [1.4474,... 1.8487], [1.1080, 1.8423, 1.4414], [1.6413, 1.9538, 1.5092]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.2799, 1.8299, 1.4178], [1.2259, 1.4449, 1.5913], [1.8020, 1.8521, 1.9351], [1.3948,... 1.2391], [1.6026, 1.1393, 1.0473], [1.7296, 1.6818, 1.4620]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5353], [1.7216], [1.5549], [1.6380], [1.4469], [1.3115], [1...266], [1.0584], [1.3803], [1.7577], [1.4740]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.4582, 1.9538, 1.9882], [1.1622, 1.4435, 1.5964], [1.3660, 1.6947, 1.4631], [1.1534,... 1.2735], [1.5745, 1.0938, 1.3485], [1.3410, 1.0639, 1.5164]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6643], [1.7597], [1.8893], [1.7508], [1.0303], [1.2661], [1...244], [1.7019], [1.5782], [1.2326], [1.5855]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.5603], [1.2375], [1.7112], [1.0302], [1.0684], [1.4512], [1...580], [1.4283], [1.2597], [1.8296], [1.8174]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9682], [1.8133], [1.4432], [1.1159], [1.1753], [1.9987], [1...681], [1.9563], [1.9551], [1.8359], [1.1028]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.8077], [1.4855], [1.4417], [1.4461], [1.3657], [1.1106], [1...838], [1.9480], [1.6002], [1.3375], [1.8298]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8167], [1.6060], [1.0002], [1.5336], [1.6964], [1.4879], [1...632], [1.6280], [1.6335], [1.7881], [1.6955]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([1.9882, 1.3705, 1.9824, 1.2338, 1.4456, 1.6783, 1.7172, 1.3214, 1.1105, 1.1770, 1.9605, 1.6384, 1.8654...2338, 1.2762, 1.8317, 1.8304, 1.8710, 1.2585, 1.4213, 1.8213, 1.9976], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.3744, 1.2163, 1.8413, 1.6030, 1.8236, 1.0125, 1.6684, 1.8377, 1.1318, 1.5802, 1.1978, 1.9531, 1.2199...3969, 1.5985, 1.9951, 1.9629, 1.5330, 1.2488, 1.2144, 1.5143, 1.8740], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-add-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'add', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([1.4976, 1.8731, 1.4528, 1.7531, 1.9699, 1.5962, 1.3387, 1.7555, 1.4868, 1.0324, 1.3024, 1.1484, 1.8398...3719, 1.1070, 1.2897, 1.7213, 1.1287, 1.8756, 1.2568, 1.4970, 1.6760], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.8489, 1.6336, 1.0979, 1.1728, 1.7234, 1.3679, 1.8790, 1.5064, 1.4108, 1.2337, 1.6397, 1.2184, 1.8209...2060, 1.0992, 1.0982, 1.8601, 1.8337, 1.1286, 1.1324, 1.0777, 1.5493], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: add, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[[[[1.0954], [1.1833], [1.5109]]], [[[1.1341], [1.2921], ...2]]], [[[1.6139], [1.1328], [1.7618]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.6133]], [[-1.8201]], [[-1.2430]]]], [[[[-1.0244]], [[-... [[[[-1.7729]], [[-1.7789]], [[-1.4292]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[[[[1.3503], [1.5213], [1.7971]]], [[[1.5618], [1.1607], ...9]]], [[[1.6969], [1.0352], [1.3095]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.1252]], [[-1.8176]], [[-1.8519]]]], [[[[-1.2481]], [[-... [[[[-1.7766]], [[-1.6847]], [[-1.4375]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[1.2520, 1.2700, 1.6748], [1.5990, 1.6844, 1.6038], [1.6880, 1.8502, 1.9459]], [[1...4010], [1.8764, 1.5912, 1.7242], [1.7740, 1.8834, 1.0525]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.2534, -1.3245, -1.6296]], [[-1.0119, -1.6818, -1.4600]], [[-1.4929, -1.2280, -1.8037]],...1.0317, -1.3838, -1.4002]], [[-1.5361, -1.1120, -1.1419]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[[1.1037, 1.4217, 1.7989], [1.8899, 1.3542, 1.0560], [1.3775, 1.0741, 1.5777]], [[1...2072], [1.2889, 1.9613, 1.1899], [1.7151, 1.3571, 1.0588]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.9220, -1.1152, -1.9870]], [[-1.4924, -1.3643, -1.2723]], [[-1.5560, -1.4278, -1.8244]],...1.9944, -1.3685, -1.0937]], [[-1.6366, -1.0992, -1.7066]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.8432], [1.7917], [1.0293], [1.6452], [1.1834], [1.4613], [1...152], [1.0864], [1.1842], [1.4948], [1.2876]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7504, -1.7719, -1.8410], [-1.5272, -1.4582, -1.0290], [-1.3695, -1.8879, -1.2959], ... [-1.4170, -1.5573, -1.7634], [-1.4727, -1.0040, -1.3776]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.6009], [1.7284], [1.5734], [1.8791], [1.7574], [1.0237], [1...833], [1.0924], [1.1716], [1.5934], [1.0635]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.9924, -1.8616, -1.9345], [-1.7263, -1.2404, -1.2162], [-1.8577, -1.2826, -1.3843], ... [-1.0400, -1.8275, -1.6731], [-1.3129, -1.1464, -1.0631]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.9066, 1.3944, 1.3750], [1.4626, 1.4326, 1.2302], [1.6360, 1.3427, 1.3476], [1.2541,... 1.0103], [1.7177, 1.8876, 1.8550], [1.2694, 1.9259, 1.6674]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.9370], [-1.7052], [-1.6200], [-1.5079], [-1.4864], [-1.8032], ... [-1.5731], [-1.3560], [-1.9193], [-1.2528]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.0327, 1.4269, 1.8122], [1.8974, 1.7307, 1.9873], [1.7264, 1.1628, 1.3787], [1.2209,... 1.4145], [1.8271, 1.8953, 1.1488], [1.3180, 1.2232, 1.5065]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.4551], [-1.7746], [-1.2859], [-1.0365], [-1.5731], [-1.2606], ... [-1.2583], [-1.0579], [-1.5285], [-1.7018]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.8778], [1.7210], [1.3519], [1.3545], [1.8002], [1.2613], [1...530], [1.3335], [1.0403], [1.6029], [1.5227]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.9884], [-1.7386], [-1.8216], [-1.0185], [-1.6740], [-1.4735], ... [-1.1330], [-1.1535], [-1.0121], [-1.7835]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([[1.9986], [1.8125], [1.3348], [1.7963], [1.1611], [1.7911], [1...939], [1.4351], [1.1083], [1.2284], [1.3573]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.4144], [-1.0371], [-1.1180], [-1.3692], [-1.5780], [-1.9155], ... [-1.1991], [-1.8910], [-1.0555], [-1.0572]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([1.1461, 1.1765, 1.2557, 1.6465, 1.4332, 1.7700, 1.4815, 1.3497, 1.5859, 1.5385, 1.1634, 1.9201, 1.4048...7537, 1.2347, 1.3305, 1.6345, 1.3430, 1.4267, 1.8037, 1.6086, 1.8527], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.9981, -1.9564, -1.6551, -1.6568, -1.8752, -1.4625, -1.9855, -1.5691, -1.8962, -1.7212, -1.6127, -1....9247, -1.2235, -1.1584, -1.6070, -1.8529, -1.7784, -1.1181], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-sub-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'sub', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'min' lhs_data = tensor([1.7750, 1.9395, 1.1849, 1.6844, 1.3380, 1.6133, 1.9441, 1.6864, 1.0906, 1.9165, 1.1398, 1.0766, 1.9080...7926, 1.0838, 1.0041, 1.3682, 1.3248, 1.4277, 1.0329, 1.6358, 1.1179], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.1747, -1.8830, -1.1794, -1.8805, -1.6203, -1.2063, -1.4479, -1.3211, -1.3823, -1.0206, -1.2670, -1....7043, -1.5902, -1.3293, -1.5698, -1.2277, -1.9291, -1.9564], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: sub, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[[[[1.5028], [1.1395], [1.5477]]], [[[1.5039], [1.7530], ...0]]], [[[1.5772], [1.0942], [1.8468]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9255]], [[1.2015]], [[1.2636]]]], [[[[1.8007]], [[1.659... [[[[1.2962]], [[1.1973]], [[1.7227]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[[[[1.7696], [1.5711], [1.8251]]], [[[1.7042], [1.4652], ...5]]], [[[1.7939], [1.2731], [1.4290]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1830]], [[1.9736]], [[1.0368]]]], [[[[1.8312]], [[1.312... [[[[1.2439]], [[1.7455]], [[1.2490]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[1.1308, 1.6524, 1.4822], [1.9385, 1.1816, 1.8033], [1.2791, 1.3926, 1.7491]], [[1...3653], [1.4078, 1.8104, 1.5847], [1.9114, 1.4403, 1.1345]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.5971, 1.5217, 1.3460]], [[1.0293, 1.8709, 1.9116]], [[1.4942, 1.9856, 1.3570]], ...]], [[1.5107, 1.0673, 1.4484]], [[1.9973, 1.3712, 1.6643]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[1.6581, 1.9723, 1.9719], [1.2104, 1.1670, 1.3672], [1.6064, 1.9390, 1.8737]], [[1...5840], [1.7983, 1.8366, 1.0651], [1.9240, 1.3286, 1.9550]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.5891, 1.0498, 1.3713]], [[1.6269, 1.8040, 1.3283]], [[1.4566, 1.3452, 1.3013]], ...]], [[1.4412, 1.6571, 1.5188]], [[1.4223, 1.2200, 1.5114]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.2910], [1.9446], [1.0622], [1.2689], [1.5834], [1.0667], [1...937], [1.9719], [1.0118], [1.7459], [1.9834]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3767, 1.5692, 1.8267], [1.5609, 1.2438, 1.3664], [1.6017, 1.1091, 1.9430], [1.1032,... 1.0301], [1.0407, 1.7588, 1.3801], [1.1191, 1.8753, 1.7560]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.2758], [1.7173], [1.5135], [1.0006], [1.5529], [1.0452], [1...703], [1.8801], [1.9652], [1.3973], [1.5315]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9544, 1.8540, 1.0481], [1.9131, 1.8163, 1.2943], [1.3622, 1.5772, 1.5474], [1.1681,... 1.6377], [1.3226, 1.0227, 1.2294], [1.7265, 1.7555, 1.5638]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.3848, 1.6472, 1.4687], [1.9921, 1.9838, 1.7610], [1.3150, 1.1022, 1.5023], [1.8276,... 1.0122], [1.6329, 1.0970, 1.7823], [1.2344, 1.7628, 1.7074]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7315], [1.8633], [1.4294], [1.1130], [1.1697], [1.8808], [1...653], [1.7885], [1.6456], [1.1009], [1.2342]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.1959, 1.1775, 1.1123], [1.4725, 1.0311, 1.1489], [1.4850, 1.1660, 1.1101], [1.6711,... 1.8470], [1.5391, 1.9659, 1.4608], [1.2370, 1.2580, 1.2714]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0309], [1.8841], [1.1403], [1.0350], [1.8449], [1.0440], [1...235], [1.5192], [1.5290], [1.4279], [1.7869]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.3519], [1.3939], [1.6735], [1.2460], [1.9999], [1.8093], [1...551], [1.5808], [1.0612], [1.2360], [1.6507]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1287], [1.6548], [1.6422], [1.9908], [1.9935], [1.8444], [1...851], [1.1661], [1.9893], [1.8229], [1.7888]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.1065], [1.1075], [1.1215], [1.1859], [1.2554], [1.3146], [1...994], [1.4488], [1.6995], [1.6901], [1.4876]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6798], [1.8731], [1.0956], [1.6834], [1.1068], [1.1642], [1...998], [1.9732], [1.8178], [1.2184], [1.3875]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([1.6063, 1.5314, 1.0479, 1.9152, 1.8522, 1.3913, 1.7159, 1.5087, 1.8078, 1.8172, 1.8817, 1.4343, 1.6147...1098, 1.1022, 1.1622, 1.2310, 1.3203, 1.8717, 1.3150, 1.7530, 1.4576], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.8835, 1.1938, 1.0169, 1.6819, 1.4608, 1.9024, 1.6934, 1.9831, 1.9746, 1.1744, 1.8635, 1.4116, 1.4612...2194, 1.7818, 1.6285, 1.5893, 1.7531, 1.0942, 1.8572, 1.7827, 1.7678], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-mul-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'mul', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([1.5190, 1.1598, 1.2749, 1.9526, 1.3789, 1.6662, 1.2876, 1.9316, 1.2347, 1.6927, 1.2405, 1.4224, 1.9774...8699, 1.6683, 1.1561, 1.4240, 1.0873, 1.9789, 1.4701, 1.7918, 1.3935], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.2095, 1.8841, 1.5353, 1.9424, 1.9198, 1.9943, 1.7078, 1.0867, 1.5494, 1.5239, 1.6607, 1.1116, 1.6259...6471, 1.4871, 1.9793, 1.2255, 1.6120, 1.4764, 1.7329, 1.9199, 1.4729], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: mul, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[[[[1.5782], [1.4698], [1.4654]]], [[[1.9503], [1.2268], ...0]]], [[[1.2978], [1.9345], [1.8658]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5076]], [[0.6682]], [[0.5465]]]], [[[[0.5043]], [[0.673... [[[[0.5025]], [[0.8602]], [[0.6039]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[[[[1.1794], [1.1806], [1.0298]]], [[[1.2813], [1.4987], ...2]]], [[[1.6214], [1.6495], [1.8810]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.9996]], [[0.6779]], [[0.5509]]]], [[[[0.6312]], [[0.523... [[[[0.5449]], [[0.6950]], [[0.8257]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[1.4417, 1.8383, 1.2883], [1.4916, 1.8323, 1.3929], [1.0946, 1.4812, 1.1570]], [[1...8331], [1.9333, 1.7136, 1.5581], [1.0500, 1.4380, 1.3367]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.5832, 0.5245, 0.9123]], [[0.5833, 0.6744, 0.6416]], [[0.7159, 0.6909, 0.5235]], ... [[0.9237, 0.8273, 0.9850]], [[0.9937, 0.7834, 0.5746]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[[1.7867, 1.5842, 1.7257], [1.4484, 1.8710, 1.6905], [1.7836, 1.7616, 1.6451]], [[1...9377], [1.4128, 1.3771, 1.7721], [1.8890, 1.4450, 1.8662]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.8494, 0.7951, 0.9046]], [[0.6853, 0.5082, 0.9857]], [[0.6279, 0.9035, 0.7032]], ... [[0.7108, 0.8735, 0.5000]], [[0.6131, 0.6485, 0.5545]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.9490], [1.0723], [1.3938], [1.2146], [1.9175], [1.6947], [1...148], [1.8243], [1.6763], [1.4103], [1.3015]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5887, 0.6614, 0.7435], [0.6677, 0.5864, 0.7458], [0.7118, 0.5980, 0.8889], [0.8137,...441], [0.6077, 0.6446, 0.7506], [0.5549, 0.5672, 0.6855]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.9449], [1.9702], [1.2595], [1.6013], [1.1717], [1.3144], [1...692], [1.9622], [1.3183], [1.1681], [1.1738]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7630, 0.6112, 0.7536], [0.5253, 0.7900, 0.7866], [0.7601, 0.6238, 0.5602], [0.6543,...440], [0.7512, 0.5970, 0.8155], [0.7143, 0.9618, 0.8482]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.1642, 1.8647, 1.9574], [1.0845, 1.2068, 1.9841], [1.2847, 1.6358, 1.9695], [1.5949,... 1.1331], [1.7113, 1.0480, 1.5493], [1.7062, 1.8005, 1.3184]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.8391], [0.5738], [0.5847], [0.5069], [0.8383], [0.5038], [0..., [0.8779], [0.9443], [0.9161], [0.7172]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.3692, 1.2088, 1.0534], [1.7448, 1.2694, 1.4594], [1.3351, 1.8891, 1.1808], [1.9347,... 1.9258], [1.1573, 1.2319, 1.0810], [1.3283, 1.6375, 1.7773]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7337], [0.5192], [0.6752], [0.8047], [0.5161], [0.5581], [0..., [0.8635], [0.9713], [0.5685], [0.6726]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.0189], [1.3910], [1.0815], [1.8586], [1.8685], [1.7798], [1...670], [1.7110], [1.0603], [1.8084], [1.0530]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.8755], [0.7561], [0.7670], [0.7628], [0.5351], [0.7238], [0..., [0.5004], [0.6089], [0.5264], [0.5264]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([[1.0562], [1.6118], [1.8316], [1.7364], [1.3097], [1.4181], [1...268], [1.9401], [1.0951], [1.5323], [1.1096]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5730], [0.8823], [0.5160], [0.5215], [0.9520], [0.6194], [0..., [0.5722], [0.8919], [0.6950], [0.5551]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([1.4553, 1.6450, 1.0299, 1.8166, 1.3746, 1.4226, 1.1231, 1.2326, 1.6206, 1.0592, 1.9474, 1.0576, 1.6390...8156, 1.4985, 1.9339, 1.8782, 1.3323, 1.5676, 1.2036, 1.8896, 1.0610], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.9678, 0.9457, 0.9971, 0.5159, 0.8650, 0.5159, 0.8171, 0.7252, 0.5386, 0.8242, 0.7588, 0.9079, 0.5681..., 0.6819, 0.5668, 0.8617, 0.9034, 0.5388, 0.5477, 0.6471, 0.6079], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: div, reduce func: min) ______________________ test_spmm[idtype1-min-div-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'div', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'min' lhs_data = tensor([1.8464, 1.5417, 1.7753, 1.3473, 1.0474, 1.4243, 1.6334, 1.3641, 1.9769, 1.2155, 1.5389, 1.0582, 1.5559...3749, 1.9029, 1.0000, 1.4540, 1.4167, 1.1120, 1.9240, 1.2361, 1.8058], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.8875, 0.7632, 0.7632, 0.6213, 0.7335, 0.5304, 0.5905, 0.8545, 0.5260, 0.5320, 0.5892, 0.5295, 0.5878..., 0.7336, 0.6220, 0.8466, 0.5988, 0.9230, 0.8126, 0.7073, 0.6340], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: div, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp0-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[[[[[1.4698], [1.6758], [1.1701]]], [[[1.5883], [1.8868], ...3]]], [[[1.5831], [1.5050], [1.8720]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3223]], [[1.4970]], [[1.3549]]]], [[[[1.5449]], [[1.151... [[[[1.0044]], [[1.2770]], [[1.2772]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp0-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[[[[[1.0544], [1.4520], [1.2569]]], [[[1.7994], [1.4689], ...5]]], [[[1.3492], [1.6295], [1.0159]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.6464]], [[1.8576]], [[1.7414]]]], [[[[1.3481]], [[1.060... [[[[1.0482]], [[1.3678]], [[1.7029]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp1-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[[1.1401, 1.4461, 1.5517], [1.9527, 1.6932, 1.8780], [1.3393, 1.6551, 1.9997]], [[1...7618], [1.8550, 1.1264, 1.7317], [1.3374, 1.0422, 1.3387]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.3410, 1.8648, 1.2066]], [[1.8449, 1.2494, 1.1910]], [[1.7414, 1.3033, 1.7529]], ...]], [[1.2379, 1.5680, 1.5766]], [[1.0677, 1.9889, 1.4553]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp1-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[[1.2264, 1.9157, 1.3594], [1.8637, 1.6513, 1.1963], [1.0207, 1.0042, 1.8475]], [[1...0310], [1.8902, 1.5384, 1.7677], [1.8900, 1.9841, 1.3078]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.3429, 1.0498, 1.6213]], [[1.7488, 1.8049, 1.6619]], [[1.3878, 1.5887, 1.0006]], ...]], [[1.6801, 1.5001, 1.5560]], [[1.2544, 1.4122, 1.4485]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp2-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.1635], [1.7334], [1.1679], [1.9089], [1.6525], [1.4220], [1...709], [1.2079], [1.0054], [1.5926], [1.2578]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1319, 1.1667, 1.8400], [1.0585, 1.7362, 1.5529], [1.9751, 1.2388, 1.1922], [1.1223,... 1.9834], [1.7421, 1.2118, 1.3424], [1.2630, 1.9982, 1.5790]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp2-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.0364], [1.1667], [1.3700], [1.8795], [1.9200], [1.4214], [1...306], [1.8933], [1.9417], [1.9415], [1.2614]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2513, 1.5535, 1.1005], [1.0859, 1.2331, 1.6529], [1.4963, 1.6002, 1.8162], [1.1797,... 1.0111], [1.2107, 1.9713, 1.8421], [1.1012, 1.4994, 1.6605]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp3-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.7323, 1.5339, 1.0591], [1.6515, 1.9890, 1.4325], [1.3845, 1.1650, 1.5343], [1.5069,... 1.0488], [1.4560, 1.9273, 1.0440], [1.4523, 1.8733, 1.8973]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2134], [1.2408], [1.7802], [1.3356], [1.6940], [1.6777], [1...775], [1.5110], [1.5774], [1.8248], [1.8826]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp3-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.0074, 1.5769, 1.1522], [1.9842, 1.7374, 1.3026], [1.6795, 1.2604, 1.8884], [1.1000,... 1.8732], [1.3409, 1.0075, 1.0909], [1.6210, 1.1251, 1.8986]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1603], [1.7922], [1.6762], [1.8914], [1.5166], [1.1719], [1...832], [1.4251], [1.3700], [1.1555], [1.7818]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp4-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.4024], [1.8838], [1.8069], [1.0961], [1.9117], [1.6675], [1...136], [1.8810], [1.1095], [1.0717], [1.9764]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3771], [1.6478], [1.4127], [1.9121], [1.8586], [1.9375], [1...305], [1.9958], [1.7706], [1.0423], [1.1884]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp4-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[1.0551], [1.1372], [1.3340], [1.8910], [1.7807], [1.7669], [1...464], [1.9775], [1.0631], [1.2254], [1.0852]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2187], [1.2493], [1.0095], [1.2499], [1.2666], [1.6742], [1...942], [1.9027], [1.3516], [1.2667], [1.8667]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp5-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([1.3819, 1.3537, 1.8367, 1.6676, 1.6437, 1.4322, 1.4954, 1.5305, 1.7147, 1.3316, 1.2755, 1.6280, 1.9083...5189, 1.2686, 1.7234, 1.4954, 1.0061, 1.6008, 1.8153, 1.4176, 1.2287], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.3007, 1.1017, 1.1284, 1.6021, 1.7636, 1.3947, 1.7483, 1.6663, 1.7609, 1.9515, 1.5901, 1.0143, 1.7956...2054, 1.9309, 1.7720, 1.2798, 1.0137, 1.4349, 1.5046, 1.7386, 1.8266], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_lhs-shp5-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_lhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([1.3845, 1.8064, 1.9886, 1.9422, 1.9741, 1.6275, 1.6192, 1.5651, 1.5417, 1.9614, 1.2178, 1.0666, 1.4455...9363, 1.2494, 1.5982, 1.9690, 1.8919, 1.8301, 1.7414, 1.9348, 1.0716], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.1570, 1.5223, 1.9285, 1.2977, 1.8203, 1.6550, 1.7038, 1.5375, 1.7615, 1.1670, 1.8517, 1.5483, 1.6079...8330, 1.9910, 1.2502, 1.0829, 1.4734, 1.2155, 1.8590, 1.5332, 1.5931], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_lhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp0-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[[[[[1.2629], [1.3935], [1.2400]]], [[[1.1028], [1.0576], ...1]]], [[[1.7065], [1.7581], [1.6051]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4082]], [[1.9934]], [[1.8732]]]], [[[[1.3711]], [[1.538... [[[[1.3282]], [[1.0583]], [[1.6408]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp0-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[[[[[1.7283], [1.5982], [1.6122]]], [[[1.8736], [1.2188], ...7]]], [[[1.0719], [1.9560], [1.7160]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5582]], [[1.5179]], [[1.9930]]]], [[[[1.3210]], [[1.657... [[[[1.6504]], [[1.0596]], [[1.5476]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp1-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[[1.0852, 1.9055, 1.1564], [1.7019, 1.2669, 1.2236], [1.1772, 1.8350, 1.8998]], [[1...1747], [1.6866, 1.0194, 1.5073], [1.4242, 1.7979, 1.4916]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.1986, 1.1907, 1.2488]], [[1.2947, 1.9048, 1.4898]], [[1.4536, 1.8872, 1.0128]], ...]], [[1.7277, 1.5750, 1.4647]], [[1.2179, 1.8270, 1.5975]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp1-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[[1.6813, 1.9052, 1.3397], [1.7971, 1.8565, 1.7826], [1.8265, 1.1161, 1.5932]], [[1...1293], [1.4141, 1.2444, 1.2820], [1.5430, 1.1822, 1.6299]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.2960, 1.1640, 1.5996]], [[1.7601, 1.7145, 1.7196]], [[1.4589, 1.3251, 1.7385]], ...]], [[1.2254, 1.2238, 1.9775]], [[1.5252, 1.6853, 1.0737]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp2-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.7929], [1.8574], [1.3899], [1.2359], [1.3534], [1.2626], [1...573], [1.7743], [1.0658], [1.4861], [1.1029]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8420, 1.9710, 1.1057], [1.4568, 1.8469, 1.7010], [1.4698, 1.8206, 1.4787], [1.7484,... 1.2614], [1.8696, 1.7088, 1.6043], [1.9656, 1.6855, 1.0892]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp2-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.5116], [1.4084], [1.3064], [1.9686], [1.9940], [1.1870], [1...809], [1.3358], [1.7935], [1.5248], [1.0246]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8831, 1.7231, 1.4364], [1.0809, 1.0974, 1.3198], [1.2490, 1.8348, 1.9483], [1.3861,... 1.9182], [1.7269, 1.4706, 1.1893], [1.1243, 1.8843, 1.0773]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp3-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.5589, 1.3107, 1.7207], [1.9089, 1.3185, 1.0669], [1.3572, 1.3282, 1.3475], [1.4461,... 1.6560], [1.8142, 1.2257, 1.5088], [1.1086, 1.1866, 1.6242]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4640], [1.9530], [1.1028], [1.3960], [1.5133], [1.6845], [1...558], [1.6299], [1.3971], [1.5049], [1.1325]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp3-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.9777, 1.2364, 1.9991], [1.3785, 1.2802, 1.0445], [1.0074, 1.9090, 1.5136], [1.9186,... 1.3771], [1.3433, 1.0264, 1.4463], [1.8245, 1.7812, 1.2656]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8017], [1.4641], [1.8586], [1.8566], [1.7588], [1.8954], [1...083], [1.5111], [1.2840], [1.8831], [1.1990]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp4-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.8361], [1.0660], [1.1171], [1.1019], [1.6053], [1.4307], [1...110], [1.0925], [1.9717], [1.5921], [1.4461]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2482], [1.9155], [1.7232], [1.9971], [1.6388], [1.0006], [1...781], [1.8823], [1.1390], [1.4112], [1.0863]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp4-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([[1.5196], [1.8575], [1.9656], [1.0882], [1.0862], [1.6789], [1...999], [1.4194], [1.1624], [1.7637], [1.5416]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4618], [1.9174], [1.4837], [1.8789], [1.6275], [1.3855], [1...244], [1.9547], [1.0205], [1.5536], [1.5043]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp5-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([1.0167, 1.7561, 1.4168, 1.8017, 1.6744, 1.5869, 1.3544, 1.8271, 1.2650, 1.6990, 1.4032, 1.5081, 1.0160...3691, 1.1598, 1.4310, 1.4310, 1.3839, 1.2875, 1.9730, 1.0088, 1.9026], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.7456, 1.5091, 1.6620, 1.8315, 1.1908, 1.3253, 1.8656, 1.1113, 1.5011, 1.2387, 1.6240, 1.7704, 1.4378...8345, 1.7751, 1.2881, 1.5729, 1.0745, 1.8837, 1.8234, 1.6562, 1.6031], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_rhs, reduce func: min) ___________________ test_spmm[idtype1-min-copy_rhs-shp5-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_rhs', reducer = 'min' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'min' lhs_data = tensor([1.4106, 1.2477, 1.8027, 1.3892, 1.4971, 1.5010, 1.4925, 1.0311, 1.9911, 1.4394, 1.1871, 1.0901, 1.0051...4826, 1.5299, 1.9416, 1.6407, 1.5007, 1.4142, 1.5663, 1.7212, 1.8126], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.5497, 1.0365, 1.8301, 1.2734, 1.6391, 1.6306, 1.4686, 1.1738, 1.9476, 1.0660, 1.9547, 1.2288, 1.6953...9699, 1.9940, 1.8504, 1.4435, 1.3932, 1.6242, 1.1291, 1.0382, 1.4125], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_rhs, reduce func: min) ______________________ test_spmm[idtype1-max-add-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[[[[1.6484], [1.0304], [1.2755]]], [[[1.0347], [1.9206], ...5]]], [[[1.4237], [1.8964], [1.3571]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1205]], [[1.9561]], [[1.0350]]]], [[[[1.9828]], [[1.017... [[[[1.7945]], [[1.0779]], [[1.8006]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[[[[1.8842], [1.2344], [1.1518]]], [[[1.1828], [1.6961], ...4]]], [[[1.1978], [1.5806], [1.1465]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8844]], [[1.6772]], [[1.5577]]]], [[[[1.6448]], [[1.136... [[[[1.2873]], [[1.9667]], [[1.2276]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[1.2515, 1.4783, 1.2254], [1.4750, 1.2457, 1.5504], [1.7088, 1.7924, 1.8932]], [[1...0706], [1.7180, 1.1052, 1.7974], [1.3869, 1.5922, 1.0174]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.1973, 1.4290, 1.7468]], [[1.8599, 1.3034, 1.4423]], [[1.8831, 1.3353, 1.8491]], ...]], [[1.5022, 1.3694, 1.2080]], [[1.4894, 1.4533, 1.2087]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[1.1009, 1.8221, 1.1422], [1.0256, 1.6379, 1.4837], [1.0683, 1.4023, 1.2615]], [[1...5465], [1.2935, 1.7305, 1.4314], [1.3528, 1.3273, 1.8523]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.3810, 1.6259, 1.0275]], [[1.4572, 1.6007, 1.4000]], [[1.4644, 1.5316, 1.7036]], ...]], [[1.3295, 1.0956, 1.2527]], [[1.6802, 1.8789, 1.1223]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.2429], [1.9164], [1.6170], [1.2755], [1.5579], [1.7431], [1...312], [1.2524], [1.8075], [1.8955], [1.9470]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4278, 1.0479, 1.1723], [1.0271, 1.2008, 1.6296], [1.2769, 1.9274, 1.1456], [1.0639,... 1.3438], [1.1465, 1.6115, 1.4028], [1.6362, 1.4307, 1.5875]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.7222], [1.6960], [1.5294], [1.5871], [1.3925], [1.6335], [1...772], [1.2467], [1.8966], [1.4203], [1.2294]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5352, 1.1034, 1.3126], [1.9690, 1.5661, 1.9415], [1.0854, 1.9986, 1.8343], [1.2603,... 1.6305], [1.9329, 1.1132, 1.1413], [1.2644, 1.0556, 1.2300]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.6029, 1.2697, 1.8503], [1.0109, 1.0956, 1.9009], [1.2697, 1.4627, 1.3621], [1.0659,... 1.6414], [1.0331, 1.2981, 1.5971], [1.4372, 1.7759, 1.4741]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9205], [1.0302], [1.5533], [1.1146], [1.1601], [1.8411], [1...608], [1.8055], [1.4697], [1.4972], [1.3064]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.6549, 1.9449, 1.1695], [1.9661, 1.3288, 1.6818], [1.1717, 1.6149, 1.0752], [1.0964,... 1.6961], [1.8251, 1.6347, 1.0674], [1.4174, 1.2251, 1.2003]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3558], [1.7475], [1.0836], [1.9427], [1.4695], [1.0492], [1...238], [1.0008], [1.3191], [1.2500], [1.1136]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.7887], [1.0927], [1.9458], [1.4607], [1.3541], [1.1942], [1...452], [1.3808], [1.0745], [1.1748], [1.6427]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0169], [1.1444], [1.2907], [1.7950], [1.4361], [1.4511], [1...873], [1.3556], [1.8842], [1.6126], [1.1453]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.5903], [1.7993], [1.5298], [1.9509], [1.5383], [1.4867], [1...804], [1.1430], [1.8269], [1.6247], [1.2844]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2649], [1.6362], [1.1449], [1.0393], [1.5450], [1.5004], [1...927], [1.1991], [1.6898], [1.0456], [1.6805]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([1.3143, 1.1159, 1.9778, 1.8013, 1.4823, 1.5125, 1.4562, 1.2324, 1.3510, 1.9554, 1.0228, 1.1616, 1.7007...6620, 1.6756, 1.8937, 1.3661, 1.8465, 1.2889, 1.5916, 1.0558, 1.9082], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.8623, 1.2938, 1.1503, 1.2338, 1.1660, 1.1811, 1.4269, 1.1161, 1.8911, 1.6928, 1.2857, 1.7126, 1.9865...4164, 1.0757, 1.2195, 1.5740, 1.1135, 1.1774, 1.4546, 1.2151, 1.7460], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-add-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'add', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([1.4069, 1.7375, 1.4316, 1.5201, 1.2057, 1.4948, 1.2030, 1.8539, 1.9017, 1.6513, 1.5824, 1.0560, 1.4313...3759, 1.1348, 1.6728, 1.9832, 1.3891, 1.6625, 1.8278, 1.4361, 1.6079], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.4609, 1.0908, 1.8458, 1.3594, 1.7643, 1.0395, 1.6861, 1.9208, 1.4579, 1.3449, 1.2688, 1.9385, 1.8294...7935, 1.8717, 1.2304, 1.3193, 1.7149, 1.5796, 1.9270, 1.4824, 1.2656], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: add, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[[[[1.6710], [1.1993], [1.0159]]], [[[1.4273], [1.0016], ...2]]], [[[1.0295], [1.0606], [1.4223]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.2083]], [[-1.6583]], [[-1.8273]]]], [[[[-1.7555]], [[-... [[[[-1.8051]], [[-1.6480]], [[-1.3343]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[[[[1.7029], [1.8835], [1.6903]]], [[[1.6594], [1.9518], ...6]]], [[[1.1303], [1.2447], [1.1095]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.8827]], [[-1.1118]], [[-1.8283]]]], [[[[-1.5953]], [[-... [[[[-1.8634]], [[-1.8155]], [[-1.9792]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[1.6894, 1.4038, 1.8861], [1.2866, 1.0783, 1.1571], [1.5827, 1.0922, 1.8288]], [[1...1657], [1.8921, 1.2126, 1.0891], [1.0354, 1.5819, 1.9253]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.3932, -1.2954, -1.8085]], [[-1.7726, -1.9924, -1.3538]], [[-1.9603, -1.3349, -1.0110]],...1.8100, -1.6053, -1.5374]], [[-1.4049, -1.4442, -1.0912]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[[1.7460, 1.0200, 1.4781], [1.2512, 1.0960, 1.3278], [1.5788, 1.2799, 1.7301]], [[1...4963], [1.8149, 1.5038, 1.3788], [1.6144, 1.8743, 1.4866]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[-1.4096, -1.7699, -1.0804]], [[-1.2115, -1.7428, -1.7657]], [[-1.0424, -1.9679, -1.8557]],...1.7218, -1.2556, -1.7638]], [[-1.2145, -1.6339, -1.4866]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.5233], [1.0383], [1.6685], [1.3123], [1.6971], [1.7692], [1...275], [1.5620], [1.4277], [1.9463], [1.2396]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8849, -1.2206, -1.9895], [-1.3651, -1.4547, -1.4787], [-1.4676, -1.1346, -1.9223], ... [-1.4135, -1.0864, -1.0676], [-1.6173, -1.3113, -1.0025]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.3269], [1.8170], [1.2932], [1.3458], [1.4548], [1.9844], [1...777], [1.2890], [1.3291], [1.1746], [1.8865]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8781, -1.5377, -1.9643], [-1.0779, -1.1751, -1.8633], [-1.5891, -1.4920, -1.3847], ... [-1.6692, -1.9191, -1.8837], [-1.6118, -1.2407, -1.4425]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.8000, 1.3116, 1.6744], [1.1415, 1.6735, 1.1104], [1.8738, 1.9800, 1.9700], [1.5741,... 1.5641], [1.3120, 1.9904, 1.3634], [1.0349, 1.8742, 1.0389]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7586], [-1.5014], [-1.3815], [-1.0268], [-1.6477], [-1.5634], ... [-1.4503], [-1.4542], [-1.1879], [-1.1530]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.1802, 1.2759, 1.1957], [1.3816, 1.6498, 1.6544], [1.8046, 1.2842, 1.9946], [1.1655,... 1.6813], [1.0078, 1.3762, 1.4047], [1.0819, 1.2983, 1.5378]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7239], [-1.6005], [-1.5605], [-1.9915], [-1.4893], [-1.2960], ... [-1.8262], [-1.9855], [-1.8040], [-1.8762]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.7100], [1.5252], [1.4182], [1.1332], [1.5762], [1.3340], [1...863], [1.3995], [1.0864], [1.7921], [1.6610]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7362], [-1.7287], [-1.5887], [-1.0973], [-1.4916], [-1.9457], ... [-1.0751], [-1.6149], [-1.6467], [-1.2077]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([[1.2048], [1.7449], [1.3245], [1.6570], [1.3350], [1.4174], [1...675], [1.4222], [1.9315], [1.5076], [1.7682]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.5364], [-1.2326], [-1.4377], [-1.4171], [-1.2268], [-1.7164], ... [-1.7603], [-1.3743], [-1.5605], [-1.0198]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([1.5983, 1.1329, 1.7750, 1.2139, 1.3339, 1.0927, 1.0970, 1.5895, 1.4347, 1.7503, 1.9357, 1.1900, 1.5401...6060, 1.9909, 1.5719, 1.0493, 1.7641, 1.9698, 1.2038, 1.4722, 1.0548], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.4369, -1.0159, -1.8098, -1.5402, -1.1044, -1.9850, -1.6287, -1.0428, -1.4526, -1.5362, -1.2344, -1....8536, -1.6362, -1.7124, -1.4280, -1.1415, -1.9517, -1.2309], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-sub-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'sub', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add', reduce_op = 'max' lhs_data = tensor([1.1225, 1.1868, 1.7393, 1.7282, 1.9819, 1.9571, 1.2376, 1.7159, 1.0739, 1.2552, 1.8539, 1.3830, 1.2693...3839, 1.7508, 1.7325, 1.6026, 1.3937, 1.2544, 1.0734, 1.1874, 1.7093], dtype=torch.float64, requires_grad=True) rhs_data = tensor([-1.9403, -1.7250, -1.6978, -1.1687, -1.0307, -1.0651, -1.5353, -1.4374, -1.9746, -1.4406, -1.4079, -1....6111, -1.1127, -1.0668, -1.5089, -1.2140, -1.2908, -1.1077], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: sub, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[[[[1.9098], [1.5737], [1.8178]]], [[[1.0649], [1.0954], ...1]]], [[[1.0763], [1.6155], [1.0357]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5813]], [[1.3061]], [[1.5431]]]], [[[[1.5215]], [[1.956... [[[[1.6801]], [[1.0502]], [[1.4200]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[[[[1.3739], [1.5778], [1.4334]]], [[[1.5437], [1.0221], ...6]]], [[[1.9586], [1.1774], [1.0344]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2042]], [[1.3376]], [[1.8695]]]], [[[[1.2767]], [[1.157... [[[[1.4955]], [[1.9839]], [[1.8578]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[1.4787, 1.5902, 1.4231], [1.0788, 1.0627, 1.9366], [1.8821, 1.9349, 1.2776]], [[1...4976], [1.4234, 1.8311, 1.1166], [1.5282, 1.2209, 1.8966]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.5182, 1.8592, 1.7181]], [[1.6052, 1.7119, 1.0001]], [[1.8543, 1.8931, 1.4878]], ...]], [[1.2731, 1.3555, 1.6728]], [[1.0597, 1.5325, 1.5273]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[1.9579, 1.3735, 1.9051], [1.5647, 1.7732, 1.0863], [1.3590, 1.9908, 1.2813]], [[1...3151], [1.5983, 1.1743, 1.3600], [1.9148, 1.9345, 1.9410]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.3212, 1.8615, 1.8953]], [[1.6423, 1.6517, 1.1731]], [[1.1222, 1.0155, 1.2929]], ...]], [[1.5164, 1.0748, 1.9708]], [[1.9055, 1.4012, 1.1642]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.0201], [1.1572], [1.6627], [1.5802], [1.0173], [1.8224], [1...653], [1.2132], [1.8409], [1.8934], [1.7112]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9936, 1.3945, 1.0713], [1.0171, 1.0462, 1.6008], [1.5121, 1.6242, 1.1807], [1.6687,... 1.5105], [1.5716, 1.1216, 1.0793], [1.6869, 1.0195, 1.3753]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.2333], [1.4924], [1.3779], [1.1696], [1.6526], [1.7366], [1...587], [1.6424], [1.8781], [1.6527], [1.3478]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3368, 1.6306, 1.7414], [1.6420, 1.5462, 1.9042], [1.9471, 1.6855, 1.9585], [1.1235,... 1.8532], [1.9351, 1.7820, 1.9041], [1.5047, 1.4704, 1.5321]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.6656, 1.7254, 1.8844], [1.2399, 1.1594, 1.5333], [1.6897, 1.4970, 1.0427], [1.6956,... 1.0559], [1.3066, 1.2406, 1.3969], [1.1751, 1.4406, 1.6484]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8189], [1.5455], [1.3785], [1.3565], [1.1803], [1.2639], [1...327], [1.7538], [1.9322], [1.7509], [1.3283]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.8312, 1.9835, 1.9149], [1.3803, 1.8956, 1.4052], [1.8990, 1.8760, 1.9748], [1.0122,... 1.3484], [1.9224, 1.2670, 1.3520], [1.7785, 1.6861, 1.2813]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0812], [1.2130], [1.3799], [1.3315], [1.4084], [1.9466], [1...796], [1.1258], [1.5135], [1.5839], [1.4384]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.4553], [1.8559], [1.8673], [1.3657], [1.3738], [1.9656], [1...767], [1.7624], [1.6802], [1.2979], [1.9216]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2836], [1.3746], [1.9275], [1.6442], [1.1895], [1.6975], [1...476], [1.3855], [1.9597], [1.6156], [1.2376]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.1072], [1.8898], [1.4726], [1.4847], [1.7249], [1.2015], [1...258], [1.9639], [1.2709], [1.1926], [1.2269]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3875], [1.3281], [1.0289], [1.8613], [1.7606], [1.0527], [1...497], [1.4708], [1.8926], [1.2815], [1.4875]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([1.8228, 1.4628, 1.0627, 1.9842, 1.8110, 1.9584, 1.6966, 1.3928, 1.3049, 1.0431, 1.8943, 1.8421, 1.3337...6775, 1.4457, 1.3016, 1.2051, 1.7284, 1.7926, 1.8388, 1.7243, 1.3664], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.6584, 1.1367, 1.3239, 1.4219, 1.8897, 1.5108, 1.2809, 1.1678, 1.7880, 1.4216, 1.9889, 1.1868, 1.5045...6594, 1.0755, 1.8693, 1.7436, 1.7697, 1.0679, 1.7268, 1.6327, 1.2088], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-mul-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'mul', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([1.1289, 1.9071, 1.7042, 1.7680, 1.1742, 1.3646, 1.6350, 1.5063, 1.0565, 1.1061, 1.5828, 1.2087, 1.6920...0777, 1.0799, 1.7273, 1.2265, 1.1330, 1.7749, 1.2509, 1.6410, 1.3465], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.1887, 1.1863, 1.9326, 1.1716, 1.3959, 1.0906, 1.1791, 1.6346, 1.9712, 1.7942, 1.8512, 1.0829, 1.4627...5418, 1.4749, 1.8373, 1.3611, 1.6667, 1.6000, 1.1178, 1.5259, 1.9340], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: mul, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp0-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[[[[1.5065], [1.5879], [1.8572]]], [[[1.7271], [1.0511], ...1]]], [[[1.7830], [1.0518], [1.6875]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.8295]], [[0.5608]], [[0.6024]]]], [[[[0.8998]], [[0.762... [[[[0.5992]], [[0.9854]], [[0.5038]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp0-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[[[[1.0469], [1.2695], [1.1033]]], [[[1.6891], [1.6108], ...9]]], [[[1.7035], [1.9591], [1.3243]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.7181]], [[0.5536]], [[0.5720]]]], [[[[0.6747]], [[0.807... [[[[0.5243]], [[0.5440]], [[0.5116]]]]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp1-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[1.8480, 1.5246, 1.8162], [1.9912, 1.7947, 1.1639], [1.6294, 1.9936, 1.9350]], [[1...2541], [1.5638, 1.7415, 1.1112], [1.0334, 1.5712, 1.4737]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.8791, 0.5833, 0.9851]], [[0.6638, 0.5310, 0.6871]], [[0.6567, 0.7700, 0.6086]], ... [[0.7471, 0.6909, 0.6189]], [[0.9870, 0.5232, 0.7054]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp1-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[[1.8785, 1.8400, 1.3872], [1.9358, 1.8541, 1.4249], [1.1472, 1.9783, 1.2147]], [[1...8789], [1.6483, 1.6969, 1.0370], [1.3156, 1.9904, 1.0736]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[0.6247, 0.6437, 0.8087]], [[0.5649, 0.5420, 0.5590]], [[0.6325, 0.9158, 0.5496]], ... [[0.9229, 0.5861, 0.5790]], [[0.6952, 0.6505, 0.6407]]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp2-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.6058], [1.2850], [1.7664], [1.0440], [1.1379], [1.5469], [1...724], [1.9767], [1.8668], [1.0180], [1.1543]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9361, 0.8939, 0.8985], [0.9070, 0.5157, 0.8261], [0.7529, 0.5484, 0.5751], [0.5264,...439], [0.7259, 0.5097, 0.5092], [0.8728, 0.8070, 0.6599]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp2-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.3993], [1.3127], [1.9887], [1.2151], [1.9212], [1.1342], [1...639], [1.1285], [1.3661], [1.1923], [1.9079]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9755, 0.7047, 0.8047], [0.9307, 0.6794, 0.6001], [0.8105, 0.6918, 0.6278], [0.9147,...465], [0.6554, 0.5553, 0.5074], [0.6860, 0.6799, 0.9490]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp3-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.7220, 1.0853, 1.9818], [1.6752, 1.4155, 1.6388], [1.7989, 1.4143, 1.4347], [1.0349,... 1.0431], [1.1862, 1.5197, 1.5672], [1.4070, 1.1962, 1.6013]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5746], [0.8018], [0.7010], [0.7842], [0.8923], [0.6273], [0..., [0.9487], [0.5450], [0.6363], [0.9175]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp3-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.0140, 1.2150, 1.5346], [1.4177, 1.7949, 1.4927], [1.2190, 1.1698, 1.5771], [1.7999,... 1.8808], [1.3981, 1.7044, 1.5008], [1.6933, 1.1393, 1.4932]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7741], [0.5554], [0.7943], [0.6672], [0.9120], [0.7979], [0..., [0.7899], [0.5359], [0.5827], [0.7615]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp4-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.1506], [1.0079], [1.5863], [1.4433], [1.5960], [1.8890], [1...922], [1.7279], [1.7274], [1.0397], [1.3814]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7358], [0.7896], [0.5068], [0.6050], [0.7768], [0.8811], [0..., [0.5378], [0.5518], [0.6372], [0.5237]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp4-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([[1.4116], [1.2072], [1.5707], [1.7063], [1.0849], [1.3534], [1...862], [1.2946], [1.0582], [1.3900], [1.0174]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7741], [0.8095], [0.7370], [0.6995], [0.5593], [0.5312], [0..., [0.5977], [0.7064], [0.5108], [0.8706]], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp5-g0] ______________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([1.2840, 1.5758, 1.8906, 1.5873, 1.6298, 1.1616, 1.8868, 1.1453, 1.8178, 1.1098, 1.4267, 1.3660, 1.8151...0248, 1.6071, 1.7271, 1.2412, 1.6134, 1.4371, 1.2423, 1.6250, 1.7911], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.5889, 0.9095, 0.7789, 0.6681, 0.8375, 0.8557, 0.7819, 0.9678, 0.5665, 0.9792, 0.7127, 0.6073, 0.5057..., 0.7798, 0.6902, 0.5120, 0.9838, 0.7712, 0.5186, 0.7021, 0.7612], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: div, reduce func: max) ______________________ test_spmm[idtype1-max-div-shp5-g1] ______________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'div', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'max' lhs_data = tensor([1.7436, 1.1943, 1.3728, 1.6367, 1.9111, 1.6654, 1.0459, 1.8163, 1.5111, 1.8469, 1.8584, 1.3269, 1.9050...3599, 1.7262, 1.8937, 1.1168, 1.8266, 1.8010, 1.2281, 1.4239, 1.0357], dtype=torch.float64, requires_grad=True) rhs_data = tensor([0.9574, 0.5579, 0.9291, 0.7337, 0.9720, 0.5486, 0.5915, 0.6935, 0.6016, 0.5948, 0.8707, 0.9778, 0.8813..., 0.8513, 0.6341, 0.8844, 0.8091, 0.6132, 0.6292, 0.5389, 0.6101], dtype=torch.float64, grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: div, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp0-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[[[[[1.5762], [1.1993], [1.6049]]], [[[1.5063], [1.9017], ...2]]], [[[1.8009], [1.5730], [1.2475]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.6567]], [[1.2313]], [[1.9511]]]], [[[[1.4108]], [[1.993... [[[[1.6033]], [[1.4304]], [[1.6380]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp0-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[[[[[1.4200], [1.2955], [1.4388]]], [[[1.6827], [1.9414], ...6]]], [[[1.2641], [1.1193], [1.8508]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1072]], [[1.8057]], [[1.4448]]]], [[[[1.9204]], [[1.718... [[[[1.7673]], [[1.0985]], [[1.7253]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp1-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[[1.1098, 1.5718, 1.3898], [1.4553, 1.4995, 1.0077], [1.5388, 1.2813, 1.4717]], [[1...3816], [1.4732, 1.3292, 1.0252], [1.4471, 1.3823, 1.0789]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.7284, 1.9422, 1.3451]], [[1.4416, 1.7289, 1.6900]], [[1.6003, 1.5733, 1.1725]], ...]], [[1.4106, 1.3942, 1.7273]], [[1.4111, 1.8861, 1.2580]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp1-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[[1.4826, 1.7108, 1.1790], [1.9217, 1.7443, 1.1297], [1.7688, 1.1305, 1.8011]], [[1...6918], [1.9311, 1.1984, 1.7968], [1.9070, 1.5666, 1.0936]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.1752, 1.5311, 1.5241]], [[1.2303, 1.3519, 1.8960]], [[1.9373, 1.6842, 1.9673]], ...]], [[1.5745, 1.1065, 1.9418]], [[1.9953, 1.0088, 1.0581]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp2-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.4019], [1.3641], [1.7696], [1.0823], [1.9762], [1.4675], [1...420], [1.8417], [1.3024], [1.4801], [1.1841]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0447, 1.9978, 1.1113], [1.9258, 1.8309, 1.9743], [1.5400, 1.0470, 1.0627], [1.5777,... 1.0082], [1.2106, 1.0517, 1.9206], [1.4622, 1.5070, 1.3354]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp2-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.5005], [1.2116], [1.1906], [1.3601], [1.9966], [1.0834], [1...413], [1.7849], [1.8476], [1.3743], [1.4578]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7152, 1.4286, 1.3644], [1.1512, 1.9979, 1.7252], [1.3022, 1.9409, 1.3333], [1.2775,... 1.3597], [1.1966, 1.9924, 1.7431], [1.6741, 1.5632, 1.7706]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp3-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.8277, 1.4389, 1.5491], [1.2772, 1.8915, 1.4389], [1.7355, 1.7258, 1.5632], [1.5374,... 1.5375], [1.1187, 1.1519, 1.3438], [1.6218, 1.6354, 1.1951]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4946], [1.7981], [1.5549], [1.9183], [1.9975], [1.8257], [1...390], [1.1491], [1.5506], [1.1595], [1.6848]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp3-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.8943, 1.4179, 1.8289], [1.7115, 1.6309, 1.7848], [1.4635, 1.6862, 1.4046], [1.8447,... 1.9818], [1.8169, 1.1324, 1.8279], [1.8363, 1.9885, 1.4305]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9651], [1.8914], [1.9935], [1.3784], [1.0007], [1.8512], [1...028], [1.5418], [1.0176], [1.4288], [1.9665]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp4-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.9108], [1.9152], [1.2647], [1.6213], [1.8683], [1.3377], [1...391], [1.7813], [1.6688], [1.0444], [1.9414]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4416], [1.7016], [1.1309], [1.5423], [1.2940], [1.8413], [1...559], [1.8790], [1.2780], [1.7550], [1.8977]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp4-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[1.9174], [1.4229], [1.8334], [1.1161], [1.2392], [1.6320], [1...061], [1.0785], [1.8489], [1.6682], [1.8143]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5653], [1.4571], [1.3716], [1.0650], [1.8752], [1.7198], [1...138], [1.6927], [1.0969], [1.5324], [1.9261]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp5-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([1.0966, 1.7699, 1.4928, 1.4720, 1.9015, 1.9101, 1.0270, 1.1513, 1.6360, 1.1785, 1.6189, 1.8550, 1.8124...6803, 1.7963, 1.7009, 1.9737, 1.8786, 1.2489, 1.8793, 1.4399, 1.6331], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.8297, 1.2791, 1.6426, 1.1416, 1.5940, 1.9716, 1.6360, 1.1922, 1.8324, 1.4719, 1.5379, 1.5539, 1.8881...2612, 1.7572, 1.9295, 1.5970, 1.0389, 1.1823, 1.6100, 1.9948, 1.1991], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_lhs-shp5-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_lhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([1.2424, 1.1982, 1.7685, 1.7637, 1.9998, 1.1921, 1.2401, 1.4309, 1.9515, 1.4201, 1.8512, 1.3927, 1.8375...1372, 1.5675, 1.3492, 1.8912, 1.0722, 1.5696, 1.5731, 1.5788, 1.3661], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.5413, 1.4234, 1.6399, 1.0058, 1.5044, 1.5776, 1.9651, 1.2191, 1.7505, 1.1045, 1.3384, 1.4103, 1.5368...6049, 1.9995, 1.6561, 1.3742, 1.8489, 1.3933, 1.4316, 1.1692, 1.7627], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_lhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp0-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[[[[[1.5077], [1.9991], [1.9250]]], [[[1.0276], [1.4633], ...6]]], [[[1.3975], [1.6377], [1.4991]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7558]], [[1.3417]], [[1.6443]]]], [[[[1.8493]], [[1.475... [[[[1.2692]], [[1.9579]], [[1.2139]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([100, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp0-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[[[[[1.3478], [1.2068], [1.1538]]], [[[1.1824], [1.7510], ...6]]], [[[1.5803], [1.2701], [1.8796]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1601]], [[1.5845]], [[1.7727]]]], [[[[1.4687]], [[1.316... [[[[1.6594]], [[1.2555]], [[1.2373]]]]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1, 2, 1, 3, 1]), e shape: torch.Size([300, 4, 1, 3, 1, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp1-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[[1.4646, 1.1296, 1.0295], [1.6238, 1.3505, 1.0072], [1.1973, 1.8653, 1.2577]], [[1...6131], [1.2690, 1.3072, 1.2922], [1.9269, 1.8914, 1.0231]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.0262, 1.9915, 1.8939]], [[1.4457, 1.8454, 1.9997]], [[1.9102, 1.0329, 1.3452]], ...]], [[1.6842, 1.3629, 1.9304]], [[1.0061, 1.1989, 1.2092]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([100, 1, 3]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp1-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3, 3), (1, 3)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[[1.6051, 1.8884, 1.4286], [1.0376, 1.6663, 1.0623], [1.9677, 1.6928, 1.7559]], [[1...1862], [1.1822, 1.2915, 1.0238], [1.3759, 1.2778, 1.6381]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[1.9959, 1.5547, 1.6437]], [[1.9862, 1.7733, 1.5614]], [[1.1274, 1.4943, 1.0812]], ...]], [[1.7800, 1.9794, 1.2560]], [[1.2800, 1.4094, 1.0507]]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3, 3]), e shape: torch.Size([300, 1, 3]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp2-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.9353], [1.9317], [1.5458], [1.9329], [1.2794], [1.5357], [1...738], [1.8230], [1.5120], [1.6478], [1.8997]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0527, 1.9523, 1.1929], [1.9636, 1.1363, 1.8293], [1.7396, 1.6875, 1.0390], [1.2036,... 1.4851], [1.7741, 1.3465, 1.5398], [1.8116, 1.7231, 1.4823]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1, 3), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 3]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp2-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (3,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.1865], [1.9323], [1.8352], [1.8995], [1.2474], [1.8089], [1...320], [1.1013], [1.2392], [1.9436], [1.4115]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3970, 1.6539, 1.7090], [1.9466, 1.7660, 1.7247], [1.6889, 1.5839, 1.5958], [1.9168,... 1.4057], [1.7744, 1.6280, 1.4668], [1.0317, 1.5707, 1.8093]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 3]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp3-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.0169, 1.9545, 1.0320], [1.8227, 1.3584, 1.6235], [1.4482, 1.0361, 1.2083], [1.4852,... 1.3414], [1.6327, 1.5423, 1.4297], [1.3469, 1.1528, 1.9186]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2045], [1.8481], [1.0052], [1.1498], [1.5188], [1.9305], [1...405], [1.4945], [1.7254], [1.8118], [1.3171]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp3-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (1,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.2070, 1.7472, 1.8957], [1.0413, 1.0136, 1.6912], [1.3976, 1.3221, 1.4551], [1.1243,... 1.1634], [1.9549, 1.1574, 1.5699], [1.6044, 1.0293, 1.4115]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1381], [1.6963], [1.9592], [1.7828], [1.7304], [1.1813], [1...186], [1.6917], [1.6023], [1.5720], [1.1087]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 3]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp4-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.0520], [1.6411], [1.7548], [1.6818], [1.3309], [1.1343], [1...871], [1.0398], [1.0995], [1.3611], [1.4542]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0912], [1.0418], [1.4437], [1.9030], [1.4809], [1.8104], [1...604], [1.0460], [1.5723], [1.5485], [1.4447]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([100, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp4-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([[1.1395], [1.7073], [1.6761], [1.2410], [1.1904], [1.8738], [1...394], [1.9651], [1.4256], [1.4418], [1.5279]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5907], [1.1851], [1.3082], [1.3810], [1.4143], [1.3668], [1...016], [1.6338], [1.4838], [1.2226], [1.4372]], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30, 1]), e shape: torch.Size([300, 1]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp5-g0] ____________________ idtype = torch.int64 g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((), ()), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([1.8493, 1.3279, 1.2895, 1.1608, 1.6293, 1.9074, 1.2303, 1.4470, 1.0331, 1.2174, 1.6124, 1.4632, 1.8037...5681, 1.0571, 1.3035, 1.0409, 1.0726, 1.1097, 1.1778, 1.9023, 1.4969], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.5407, 1.1753, 1.2624, 1.9827, 1.2945, 1.8554, 1.6569, 1.8298, 1.6474, 1.8429, 1.7575, 1.0565, 1.2916...1905, 1.4068, 1.9685, 1.0425, 1.0003, 1.9981, 1.4703, 1.1477, 1.5241], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([100]) SpMM(message func: copy_rhs, reduce func: max) ___________________ test_spmm[idtype1-max-copy_rhs-shp5-g1] ____________________ idtype = torch.int64 g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((), ()), msg = 'copy_rhs', reducer = 'max' @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', spmm_shapes) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'copy_lhs', 'copy_rhs']) @pytest.mark.parametrize('reducer', ['sum', 'min', 'max']) @parametrize_idtype def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): > v = gspmm(g, msg, reducer, u, e) tests/compute/test_sparse.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'max' lhs_data = tensor([1.1272, 1.1530, 1.3140, 1.9274, 1.7517, 1.6864, 1.3085, 1.9002, 1.7216, 1.3683, 1.5411, 1.4379, 1.1584...2297, 1.5651, 1.5517, 1.8666, 1.2353, 1.5947, 1.8187, 1.9068, 1.2176], dtype=torch.float64, requires_grad=True) rhs_data = tensor([1.6642, 1.6310, 1.0311, 1.4416, 1.7053, 1.6824, 1.0054, 1.9024, 1.2391, 1.6129, 1.9408, 1.4665, 1.5774...6047, 1.1117, 1.0133, 1.9453, 1.5470, 1.2875, 1.2749, 1.1516, 1.9865], dtype=torch.float64, requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 u shape: torch.Size([30]), e shape: torch.Size([300]) SpMM(message func: copy_rhs, reduce func: max) _____________________ test_sddmm[idtype0-add-u-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.8359], [1.4310], [1.2108]]], [[[1.4269], [1.5427], ...0]]], [[[1.4066], [1.5816], [1.1979]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.6500]], [[1.1763]], [[1.1556]]]], [[[[1.6716]], [[1.750... [[[[1.0850]], [[1.9148]], [[1.2829]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype0-add-u-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.6809], [1.9177], [1.6446]]], [[[1.6902], [1.6200], ...4]]], [[[1.6185], [1.8230], [1.2936]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3226]], [[1.2616]], [[1.2172]]]], [[[[1.2470]], [[1.554... [[[[1.0080]], [[1.0156]], [[1.0358]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype0-add-u-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.3031, 1.2238, 1.5944, ..., 1.0581, 1.0186, 1.4038]], [[1.9181, 1.8232, 1.9124, ..., 1.5521,... [[1.7145, 1.3003, 1.8675, ..., 1.2381, 1.1495, 1.0157]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4180, 1.1464, 1.9356, ..., 1.9569, 1.0690, 1.1504], [1.8139, 1.8424, 1.8897, ..., 1.0390, 1... [1.3406, 1.0087, 1.8938, ..., 1.8151, 1.5438, 1.2001]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype0-add-u-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.4291, 1.2911, 1.3286, ..., 1.2814, 1.5526, 1.7078]], [[1.0098, 1.0926, 1.1066, ..., 1.9850,... [[1.1461, 1.0378, 1.2832, ..., 1.3910, 1.6486, 1.6279]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4000, 1.2748, 1.6569, ..., 1.9805, 1.6038, 1.1115], [1.0172, 1.1459, 1.5732, ..., 1.3681, 1... [1.7943, 1.7346, 1.2096, ..., 1.3983, 1.1555, 1.6346]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype0-add-u-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.2798, 1.4185, 1.5339], [1.2361, 1.0130, 1.2795], [1.4693, 1.6072, 1.6701]]], ...7], [1.6377, 1.8780, 1.2169], [1.8144, 1.5647, 1.4318]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4879, 1.4659, 1.9348]], [[1.6964, 1.3684, 1.1631]], [[1.8221, 1.9239, 1.9851]], ... [[1.4035, 1.8341, 1.2474]], [[1.1138, 1.5864, 1.1728]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype0-add-u-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.9534, 1.4132, 1.4038], [1.9710, 1.7728, 1.9806], [1.2928, 1.9335, 1.7768]]], ...9], [1.7887, 1.5742, 1.6692], [1.9082, 1.4226, 1.9209]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8778, 1.6821, 1.4559]], [[1.9817, 1.8216, 1.2082]], [[1.9773, 1.2188, 1.3031]], ... [[1.0013, 1.2226, 1.2590]], [[1.6931, 1.5742, 1.5459]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype0-add-u-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5448, 1.3314, 1.2040], [1.4703, 1.0980, 1.7199], [1.1437, 1.5473, 1.1323], [1.5056,... 1.9422], [1.4349, 1.9105, 1.2195], [1.9406, 1.7291, 1.7322]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9010, 1.6635, 1.8458], [1.5432, 1.4341, 1.5912], [1.1183, 1.7080, 1.8978], [1.4062,... 1.6491], [1.5781, 1.5862, 1.8169], [1.2750, 1.8078, 1.8564]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype0-add-u-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1929, 1.2458, 1.7290], [1.7940, 1.5281, 1.6899], [1.2063, 1.7154, 1.2333], [1.0516,... 1.8003], [1.1743, 1.3603, 1.6044], [1.1837, 1.2694, 1.0415]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9464, 1.3926, 1.9113], [1.6657, 1.6324, 1.8518], [1.1292, 1.4452, 1.8811], [1.6080,... 1.8177], [1.9664, 1.1811, 1.8015], [1.7820, 1.5891, 1.9479]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype0-add-u-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.7174], [1.7400], [1.3941], [1.3929], [1.6896], [1.1994], [1...636], [1.4418], [1.6839], [1.8561], [1.9133]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8435], [1.7878], [1.7554], [1.2093], [1.0422], [1.9048], [1...564], [1.1978], [1.3405], [1.8498], [1.7777]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype0-add-u-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8147], [1.7229], [1.3779], [1.9968], [1.1666], [1.6118], [1...063], [1.7209], [1.1640], [1.9226], [1.0912]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6949], [1.0458], [1.6857], [1.3732], [1.9773], [1.3937], [1...114], [1.4225], [1.6308], [1.2919], [1.8140]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype0-add-u-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap... edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.8416], [1.7451], [1.1828]]], [[[1.9120], [1.4824], ...9]]], [[[1.5341], [1.6782], [1.3322]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8472]], [[1.2927]], [[1.3825]]]], [[[[1.9857]], [[1.204... [[[[1.7390]], [[1.7611]], [[1.9820]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype0-add-u-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.7177], [1.7597], [1.0669]]], [[[1.6947], [1.9365], ...4]]], [[[1.2413], [1.5285], [1.5667]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3162]], [[1.9923]], [[1.0590]]]], [[[[1.3361]], [[1.168... [[[[1.4483]], [[1.1261]], [[1.7266]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype0-add-u-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap... edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.8726, 1.3992, 1.5259, ..., 1.4883, 1.5476, 1.5043]], [[1.6301, 1.5875, 1.0300, ..., 1.3956,... [[1.1336, 1.9592, 1.9632, ..., 1.2289, 1.1241, 1.0071]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6147, 1.2744, 1.4549, ..., 1.7327, 1.9400, 1.2206], [1.0304, 1.0472, 1.2055, ..., 1.9126, 1... [1.4652, 1.6232, 1.8283, ..., 1.0992, 1.7763, 1.9747]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype0-add-u-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.0488, 1.8265, 1.8601, ..., 1.2549, 1.0371, 1.2858]], [[1.7043, 1.0306, 1.9673, ..., 1.5553,... [[1.0894, 1.1446, 1.4774, ..., 1.2791, 1.1638, 1.1551]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2022, 1.1100, 1.8475, ..., 1.1508, 1.5684, 1.6108], [1.5413, 1.9142, 1.5643, ..., 1.3332, 1... [1.4284, 1.2672, 1.3725, ..., 1.0818, 1.9778, 1.6328]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype0-add-u-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.6975, 1.2203, 1.2841], [1.2838, 1.3535, 1.1028], [1.7459, 1.5555, 1.7464]]], ...1], [1.4128, 1.8123, 1.9640], [1.8887, 1.3374, 1.2137]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8155, 1.5865, 1.6584]], [[1.9617, 1.4822, 1.8847]], [[1.1407, 1.8855, 1.8534]], ... [[1.3135, 1.1021, 1.0944]], [[1.2955, 1.2001, 1.4978]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype0-add-u-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.3758, 1.8711, 1.2947], [1.8792, 1.8383, 1.1650], [1.9994, 1.0185, 1.2790]]], ...7], [1.3280, 1.7114, 1.8727], [1.2514, 1.3733, 1.7533]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9342, 1.2110, 1.4657]], [[1.5014, 1.7505, 1.1415]], [[1.9846, 1.7400, 1.4357]], ... [[1.0199, 1.5805, 1.4450]], [[1.9090, 1.0612, 1.9326]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype0-add-u-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...oat64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.4191, 1.5461, 1.5294], [1.3016, 1.2080, 1.6682], [1.7263, 1.0811, 1.9747], [1.1981,... 1.1303], [1.5304, 1.6140, 1.1568], [1.0741, 1.2795, 1.3087]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5088, 1.4496, 1.5377], [1.8666, 1.7781, 1.2280], [1.6166, 1.7209, 1.5250], [1.6172,... 1.5719], [1.5015, 1.7006, 1.3041], [1.1946, 1.6119, 1.8396]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype0-add-u-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5037, 1.3300, 1.7545], [1.6087, 1.4279, 1.3494], [1.0710, 1.1794, 1.0113], [1.4892,... 1.4339], [1.9532, 1.8988, 1.1946], [1.4970, 1.4307, 1.7400]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9499, 1.6962, 1.3213], [1.9697, 1.7532, 1.0522], [1.6274, 1.5883, 1.1131], [1.2591,... 1.4200], [1.9621, 1.6811, 1.9538], [1.6598, 1.9181, 1.8434]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype0-add-u-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...oat64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.7855], [1.2114], [1.0233], [1.9180], [1.1531], [1.5277], [1...444], [1.2176], [1.2375], [1.3924], [1.4410]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8389], [1.5678], [1.1868], [1.1853], [1.2061], [1.6218], [1...924], [1.6991], [1.1442], [1.8868], [1.6223]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype0-add-u-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.0078], [1.7341], [1.0185], [1.0480], [1.6414], [1.2923], [1...908], [1.2159], [1.7952], [1.5262], [1.6154]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5128], [1.9214], [1.2815], [1.1572], [1.9905], [1.9704], [1...692], [1.2700], [1.5861], [1.0919], [1.9468]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype0-add-v-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...oat64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.9567], [1.8737], [1.6358]]], [[[1.3749], [1.7472], ...1]]], [[[1.9226], [1.1402], [1.7328]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8509]], [[1.4331]], [[1.8933]]]], [[[[1.3092]], [[1.998... [[[[1.6774]], [[1.0543]], [[1.0867]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype0-add-v-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.9250], [1.6382], [1.0213]]], [[[1.3354], [1.1147], ...7]]], [[[1.9414], [1.7082], [1.1554]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7757]], [[1.1341]], [[1.0502]]]], [[[[1.6224]], [[1.281... [[[[1.5697]], [[1.6469]], [[1.7265]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype0-add-v-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...oat64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.1243, 1.1527, 1.3101, ..., 1.6840, 1.5638, 1.5521]], [[1.1848, 1.5404, 1.3460, ..., 1.7100,... [[1.5499, 1.6683, 1.4806, ..., 1.3767, 1.3060, 1.4148]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7090, 1.0981, 1.2779, ..., 1.8151, 1.5244, 1.1344], [1.7141, 1.6868, 1.8750, ..., 1.0729, 1... [1.7753, 1.3926, 1.4929, ..., 1.5204, 1.1154, 1.4568]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype0-add-v-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.1538, 1.1012, 1.4490, ..., 1.7771, 1.6525, 1.9886]], [[1.0263, 1.2944, 1.4714, ..., 1.6066,... [[1.2419, 1.5463, 1.8135, ..., 1.0718, 1.6890, 1.9449]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8005, 1.7702, 1.6003, ..., 1.3688, 1.6425, 1.8139], [1.6533, 1.3866, 1.8309, ..., 1.4326, 1... [1.5551, 1.7148, 1.9013, ..., 1.0680, 1.9719, 1.4990]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype0-add-v-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...oat64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.7380, 1.4873, 1.3962], [1.2878, 1.6336, 1.4275], [1.6374, 1.5678, 1.4996]]], ...5], [1.7210, 1.7499, 1.4188], [1.0540, 1.8867, 1.9607]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.3642, 1.4802, 1.3580]], [[1.7799, 1.4222, 1.1960]], [[1.1043, 1.1521, 1.2609]], ... [[1.4549, 1.2572, 1.0855]], [[1.3074, 1.7075, 1.6027]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype0-add-v-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.2837, 1.9466, 1.1785], [1.2740, 1.4659, 1.8227], [1.6424, 1.6604, 1.2553]]], ...5], [1.5501, 1.7726, 1.4151], [1.3469, 1.6255, 1.8233]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4544, 1.2328, 1.0590]], [[1.6981, 1.8692, 1.4903]], [[1.2374, 1.0117, 1.2878]], ... [[1.8997, 1.5434, 1.2474]], [[1.0170, 1.0172, 1.1259]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype0-add-v-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...oat64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.9630, 1.5424, 1.1294], [1.5116, 1.2992, 1.6958], [1.6111, 1.6575, 1.8277], [1.1278,... 1.9328], [1.9040, 1.0812, 1.4000], [1.6977, 1.7979, 1.6230]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8542, 1.1590, 1.4684], [1.2662, 1.1176, 1.1915], [1.1098, 1.4203, 1.9821], [1.0650,... 1.7917], [1.8930, 1.2744, 1.0372], [1.5143, 1.0951, 1.8238]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype0-add-v-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.3009, 1.6470, 1.3698], [1.0625, 1.9079, 1.4566], [1.8825, 1.1185, 1.9489], [1.5760,... 1.0705], [1.7345, 1.7494, 1.5528], [1.9797, 1.7175, 1.6708]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2957, 1.0841, 1.3347], [1.7947, 1.3145, 1.5822], [1.2484, 1.8566, 1.1756], [1.6312,... 1.5118], [1.6252, 1.8418, 1.6607], [1.5560, 1.8869, 1.9699]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype0-add-v-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...oat64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.0514], [1.7405], [1.5707], [1.1335], [1.5358], [1.9162], [1...002], [1.2999], [1.7387], [1.3456], [1.7520]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9114], [1.6256], [1.5578], [1.4830], [1.0198], [1.2491], [1...301], [1.7675], [1.6689], [1.5014], [1.0462]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype0-add-v-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.2814], [1.5008], [1.8876], [1.1230], [1.9805], [1.8600], [1...433], [1.2718], [1.5478], [1.4223], [1.2122]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9984], [1.1118], [1.7580], [1.0512], [1.8434], [1.0435], [1...787], [1.0996], [1.9548], [1.6559], [1.1939]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype0-add-v-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap... edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.8348], [1.2017], [1.2853]]], [[[1.1527], [1.4213], ...2]]], [[[1.2267], [1.2164], [1.0098]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0514]], [[1.7855]], [[1.6194]]]], [[[[1.9627]], [[1.848... [[[[1.4108]], [[1.3004]], [[1.4338]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype0-add-v-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.9786], [1.6414], [1.8295]]], [[[1.8069], [1.8186], ...3]]], [[[1.6786], [1.6462], [1.3912]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3598]], [[1.2807]], [[1.6749]]]], [[[[1.6971]], [[1.559... [[[[1.1644]], [[1.3895]], [[1.7157]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype0-add-v-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap... edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.9010, 1.6847, 1.7533, ..., 1.5834, 1.0659, 1.0702]], [[1.2109, 1.8056, 1.6480, ..., 1.7236,... [[1.0198, 1.4905, 1.4074, ..., 1.9250, 1.7386, 1.5887]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6652, 1.7560, 1.4818, ..., 1.1206, 1.9778, 1.2700], [1.4747, 1.7307, 1.3616, ..., 1.0228, 1... [1.3968, 1.7973, 1.1483, ..., 1.0940, 1.4536, 1.7569]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype0-add-v-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.6189, 1.5536, 1.1857, ..., 1.5209, 1.7754, 1.6996]], [[1.8784, 1.0455, 1.2865, ..., 1.5965,... [[1.8087, 1.5185, 1.4866, ..., 1.0271, 1.4395, 1.5471]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2417, 1.2277, 1.0286, ..., 1.0465, 1.5529, 1.5912], [1.6433, 1.9731, 1.7465, ..., 1.3891, 1... [1.9356, 1.9918, 1.1826, ..., 1.4598, 1.4077, 1.8870]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype0-add-v-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.2689, 1.3641, 1.6528], [1.3216, 1.8237, 1.0103], [1.7499, 1.0792, 1.6611]]], ...9], [1.9066, 1.6713, 1.0165], [1.1664, 1.5703, 1.3968]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1734, 1.2236, 1.8234]], [[1.4926, 1.3726, 1.2404]], [[1.9823, 1.1630, 1.1617]], ... [[1.4622, 1.3394, 1.7800]], [[1.3954, 1.9458, 1.8851]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype0-add-v-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.4721, 1.6217, 1.2488], [1.2645, 1.1408, 1.7224], [1.6077, 1.9375, 1.6520]]], ...1], [1.2980, 1.0875, 1.0107], [1.0501, 1.5420, 1.0437]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8635, 1.1245, 1.7292]], [[1.9708, 1.7757, 1.7939]], [[1.7459, 1.2101, 1.7835]], ... [[1.7843, 1.0234, 1.7616]], [[1.8896, 1.2307, 1.7727]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype0-add-v-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...oat64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.4212, 1.2400, 1.6342], [1.4740, 1.2618, 1.4753], [1.3159, 1.1574, 1.0400], [1.8511,... 1.0276], [1.1664, 1.2244, 1.7467], [1.8909, 1.7304, 1.1327]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9218, 1.2511, 1.3874], [1.4529, 1.2632, 1.9143], [1.2529, 1.9165, 1.7794], [1.5148,... 1.1943], [1.3177, 1.1569, 1.9293], [1.7101, 1.6853, 1.9070]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype0-add-v-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.4955, 1.5018, 1.8180], [1.6861, 1.6213, 1.6839], [1.8650, 1.4055, 1.4727], [1.9067,... 1.1526], [1.7702, 1.2675, 1.6253], [1.4623, 1.4108, 1.5381]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4586, 1.0319, 1.0246], [1.4173, 1.6825, 1.3357], [1.5985, 1.1848, 1.2461], [1.2314,... 1.8365], [1.3689, 1.1988, 1.7032], [1.6507, 1.7138, 1.3947]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype0-add-v-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...oat64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8919], [1.0691], [1.5827], [1.3674], [1.7022], [1.5541], [1...699], [1.8622], [1.9631], [1.7535], [1.2969]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6937], [1.0336], [1.9698], [1.3445], [1.7812], [1.5435], [1...898], [1.4933], [1.5754], [1.4490], [1.2062]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype0-add-v-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1057], [1.1243], [1.1071], [1.5136], [1.4289], [1.2135], [1...757], [1.5487], [1.1781], [1.3181], [1.9594]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3934], [1.2186], [1.3258], [1.1676], [1.4463], [1.2933], [1...447], [1.5527], [1.2380], [1.0162], [1.5596]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype0-add-e-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.3182], [1.7467], [1.8196]]], [[[1.8710], [1.3245], ...3]]], [[[1.3035], [1.4923], [1.3409]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0560]], [[1.8389]], [[1.8469]]]], [[[[1.4838]], [[1.251... [[[[1.9405]], [[1.3624]], [[1.7716]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype0-add-e-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.2427], [1.8508], [1.5183]]], [[[1.9052], [1.7929], ...9]]], [[[1.7909], [1.0677], [1.1393]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7622]], [[1.0383]], [[1.7905]]]], [[[[1.4409]], [[1.725... [[[[1.4596]], [[1.6818]], [[1.4708]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype0-add-e-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.3765, 1.5122, 1.0759, ..., 1.1604, 1.0545, 1.2619]], [[1.1839, 1.7075, 1.0729, ..., 1.0561,... [[1.2405, 1.6166, 1.6727, ..., 1.9083, 1.4142, 1.5215]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.5225, 1.2520, 1.9882, ..., 1.3434, 1.8837, 1.2536], [1.6408, 1.9410, 1.3518, ..., 1.7068, 1... [1.2195, 1.8964, 1.3815, ..., 1.3302, 1.6177, 1.2800]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype0-add-e-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.4264, 1.1700, 1.3388, ..., 1.3167, 1.3740, 1.1690]], [[1.1355, 1.6637, 1.6083, ..., 1.3083,... [[1.2077, 1.8140, 1.1236, ..., 1.1342, 1.6144, 1.0424]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0250, 1.4934, 1.4195, ..., 1.1888, 1.0988, 1.5471], [1.5330, 1.1044, 1.1095, ..., 1.9804, 1... [1.2404, 1.9778, 1.6176, ..., 1.5283, 1.9249, 1.4107]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype0-add-e-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.6728, 1.5199, 1.6890], [1.0570, 1.7991, 1.2200], [1.6365, 1.5917, 1.8294]]], ...3], [1.4660, 1.4284, 1.0933], [1.0637, 1.8455, 1.4445]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5093, 1.2897, 1.8073]], [[1.5111, 1.0799, 1.6403]], [[1.7333, 1.6471, 1.9006]], ... [[1.2750, 1.9119, 1.4064]], [[1.4162, 1.8137, 1.7570]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype0-add-e-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.3608, 1.4903, 1.8132], [1.6964, 1.7953, 1.8057], [1.7030, 1.3190, 1.6544]]], ...4], [1.3493, 1.3269, 1.6550], [1.7677, 1.2500, 1.3304]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9435, 1.7228, 1.5689]], [[1.6011, 1.7866, 1.0538]], [[1.8682, 1.7605, 1.3843]], ... [[1.1161, 1.3753, 1.6806]], [[1.0675, 1.5902, 1.6380]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype0-add-e-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1106, 1.7558, 1.7219], [1.3467, 1.7263, 1.6285], [1.0207, 1.6002, 1.9305], [1.4801,... 1.7211], [1.6178, 1.0188, 1.8904], [1.6707, 1.8992, 1.6858]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9395, 1.6470, 1.8619], [1.8267, 1.1218, 1.1443], [1.9013, 1.5941, 1.9913], [1.0502,... 1.0614], [1.7917, 1.6913, 1.4375], [1.4071, 1.0323, 1.8221]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype0-add-e-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8790, 1.4365, 1.7450], [1.4910, 1.5349, 1.9491], [1.8633, 1.7817, 1.7897], [1.5309,... 1.6617], [1.8079, 1.8648, 1.6105], [1.3278, 1.0782, 1.5566]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6801, 1.3952, 1.5587], [1.2780, 1.3047, 1.9477], [1.9791, 1.8801, 1.0297], [1.1928,... 1.1253], [1.8956, 1.0096, 1.9076], [1.5535, 1.3646, 1.7100]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype0-add-e-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5515], [1.6934], [1.4825], [1.8167], [1.8272], [1.7604], [1...744], [1.1864], [1.8745], [1.1153], [1.6488]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3276], [1.7462], [1.8108], [1.5461], [1.5792], [1.6030], [1...647], [1.7861], [1.3674], [1.6739], [1.4727]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype0-add-e-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5466], [1.2443], [1.2504], [1.0087], [1.3137], [1.2640], [1...392], [1.2068], [1.8752], [1.8615], [1.8382]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0225], [1.6585], [1.3485], [1.7222], [1.1808], [1.8202], [1...582], [1.3201], [1.1676], [1.5675], [1.1608]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype0-add-e-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.3885], [1.5606], [1.4436]]], [[[1.4819], [1.7341], ...6]]], [[[1.2047], [1.9665], [1.7889]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2827]], [[1.0668]], [[1.2797]]]], [[[[1.7287]], [[1.306... [[[[1.7929]], [[1.5924]], [[1.6002]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype0-add-e-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.6949], [1.0547], [1.2245]]], [[[1.8198], [1.5672], ...2]]], [[[1.4554], [1.0871], [1.0510]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4745]], [[1.3380]], [[1.9316]]]], [[[[1.3467]], [[1.848... [[[[1.8327]], [[1.3381]], [[1.7677]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype0-add-e-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.9607, 1.7992, 1.8218, ..., 1.0765, 1.1108, 1.4619]], [[1.7810, 1.2431, 1.4474, ..., 1.9725,... [[1.9884, 1.7395, 1.3100, ..., 1.1142, 1.9321, 1.4091]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7343, 1.8412, 1.3918, ..., 1.4117, 1.4401, 1.1439], [1.8906, 1.3137, 1.3277, ..., 1.4540, 1... [1.2840, 1.7196, 1.5690, ..., 1.7900, 1.8975, 1.2990]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype0-add-e-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'add', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.5887, 1.4807, 1.8561, ..., 1.4600, 1.3384, 1.6648]], [[1.4489, 1.2764, 1.9063, ..., 1.3104,... [[1.5641, 1.3932, 1.5482, ..., 1.8161, 1.2781, 1.4642]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7264, 1.6589, 1.8781, ..., 1.0546, 1.2430, 1.9720], [1.3804, 1.8149, 1.7901, ..., 1.9633, 1... [1.1517, 1.7804, 1.7061, ..., 1.8603, 1.4115, 1.8675]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype0-add-e-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.4317, 1.7778, 1.9126], [1.0413, 1.9944, 1.1500], [1.1820, 1.4158, 1.6368]]], ...4], [1.3945, 1.1196, 1.6662], [1.4486, 1.4908, 1.5227]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.2106, 1.2897, 1.7018]], [[1.5544, 1.1736, 1.0461]], [[1.2291, 1.5262, 1.3196]], ... [[1.5052, 1.8467, 1.6291]], [[1.3836, 1.1778, 1.9288]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype0-add-e-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.0326, 1.3314, 1.6695], [1.2188, 1.4740, 1.9369], [1.0520, 1.7883, 1.6307]]], ...1], [1.7007, 1.4183, 1.8906], [1.1979, 1.7953, 1.2726]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4650, 1.4645, 1.5722]], [[1.0848, 1.9536, 1.9555]], [[1.2539, 1.7311, 1.5875]], ... [[1.5339, 1.2661, 1.2501]], [[1.1439, 1.4322, 1.3522]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype0-add-e-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1756, 1.5400, 1.3763], [1.7192, 1.0583, 1.0300], [1.9592, 1.7213, 1.9990], [1.6791,... 1.5240], [1.2370, 1.7238, 1.3083], [1.7558, 1.8783, 1.4392]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1220, 1.7448, 1.5011], [1.5140, 1.7289, 1.1700], [1.8325, 1.8836, 1.8819], [1.5871,... 1.4215], [1.8532, 1.6484, 1.9266], [1.4379, 1.6479, 1.5266]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype0-add-e-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.9384, 1.7409, 1.5343], [1.4391, 1.7512, 1.1807], [1.8738, 1.3219, 1.0369], [1.8073,... 1.6120], [1.2867, 1.4543, 1.5620], [1.0279, 1.4152, 1.9085]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2564, 1.9507, 1.0500], [1.7466, 1.3935, 1.8556], [1.7761, 1.0994, 1.6592], [1.3661,... 1.6491], [1.3991, 1.1034, 1.7104], [1.3774, 1.5824, 1.7532]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype0-add-e-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.0953], [1.4424], [1.0460], [1.3874], [1.2766], [1.0361], [1...885], [1.5673], [1.1326], [1.6869], [1.5008]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7124], [1.4094], [1.3679], [1.2933], [1.0417], [1.4763], [1...711], [1.5248], [1.2009], [1.0568], [1.8730]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype0-add-e-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.4844], [1.3888], [1.2782], [1.2396], [1.3067], [1.1250], [1...391], [1.6139], [1.6406], [1.8656], [1.2149]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0968], [1.5469], [1.6676], [1.0499], [1.8701], [1.0500], [1...326], [1.1035], [1.0365], [1.5324], [1.5119]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype0-sub-u-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.7140], [1.4623], [1.0240]]], [[[1.8692], [1.3161], ...3]]], [[[1.6545], [1.8443], [1.8956]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.9901]], [[-1.3673]], [[-1.5700]]]], [[[[-1.4474]], [[-... [[[[-1.8920]], [[-1.4425]], [[-1.8456]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype0-sub-u-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.9886], [1.5255], [1.1917]]], [[[1.2358], [1.2023], ...1]]], [[[1.4054], [1.8964], [1.2379]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.7722]], [[-1.0630]], [[-1.4461]]]], [[[[-1.0546]], [[-... [[[[-1.4637]], [[-1.0492]], [[-1.0446]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype0-sub-u-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.2816, 1.0215, 1.1330, ..., 1.3306, 1.4379, 1.4088]], [[1.7173, 1.1584, 1.9245, ..., 1.5517,... [[1.8020, 1.9259, 1.4166, ..., 1.4450, 1.3227, 1.1492]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.6460, -1.6524, -1.7613, ..., -1.6273, -1.4486, -1.2625], [-1.5540, -1.5062, -1.0727, ..., ... [-1.2919, -1.5689, -1.3366, ..., -1.5550, -1.3652, -1.6965]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype0-sub-u-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.3105, 1.0224, 1.4465, ..., 1.8099, 1.2377, 1.6382]], [[1.1774, 1.9240, 1.5733, ..., 1.0018,... [[1.7662, 1.4832, 1.0225, ..., 1.8007, 1.1609, 1.4435]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.8232, -1.9410, -1.3119, ..., -1.6215, -1.2623, -1.6926], [-1.2476, -1.7712, -1.1300, ..., ... [-1.3166, -1.4520, -1.3450, ..., -1.4764, -1.2231, -1.9802]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype0-sub-u-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.3809, 1.4098, 1.5973], [1.7185, 1.1275, 1.1631], [1.7810, 1.2994, 1.2133]]], ...4], [1.6574, 1.8567, 1.4692], [1.2205, 1.1798, 1.4425]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.1347, -1.2555, -1.2995]], [[-1.8279, -1.4552, -1.4824]], [[-1.7445, -1.2367, -1.5375...9478, -1.7650, -1.5422]], [[-1.6757, -1.5310, -1.7782]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype0-sub-u-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.5953, 1.9727, 1.5552], [1.0004, 1.4514, 1.3318], [1.4430, 1.9176, 1.5854]]], ...8], [1.1954, 1.4314, 1.2788], [1.9671, 1.4402, 1.5781]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.8191, -1.9559, -1.3273]], [[-1.7004, -1.0025, -1.8311]], [[-1.2558, -1.1849, -1.6456...1341, -1.1333, -1.4152]], [[-1.1560, -1.2788, -1.2003]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype0-sub-u-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.9199, 1.9664, 1.2405], [1.8858, 1.9105, 1.8692], [1.0698, 1.7128, 1.4158], [1.3388,... 1.8260], [1.5388, 1.6964, 1.9341], [1.8112, 1.1176, 1.4489]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.9275, -1.7431, -1.5677], [-1.2185, -1.5232, -1.3316], [-1.0886, -1.8577, -1.8855], ... [-1.1456, -1.7395, -1.9932], [-1.1605, -1.6553, -1.2659]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype0-sub-u-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.4628, 1.0107, 1.7200], [1.2198, 1.2974, 1.3906], [1.4891, 1.1114, 1.7112], [1.3836,... 1.4484], [1.5310, 1.8565, 1.8642], [1.7278, 1.9260, 1.7245]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.3354, -1.4297, -1.4259], [-1.6993, -1.3022, -1.5048], [-1.5636, -1.0359, -1.6039], ... [-1.0027, -1.1009, -1.8303], [-1.8741, -1.7820, -1.6216]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype0-sub-u-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8066], [1.7569], [1.8783], [1.3247], [1.9516], [1.3286], [1...014], [1.8699], [1.9959], [1.2787], [1.6449]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8544], [-1.7816], [-1.6934], [-1.0280], [-1.1927], [-1.0965], ... [-1.7039], [-1.3924], [-1.3948], [-1.0451]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype0-sub-u-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1734], [1.4784], [1.9976], [1.7887], [1.3108], [1.8983], [1...025], [1.2204], [1.5255], [1.6550], [1.1919]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8278], [-1.9221], [-1.6205], [-1.9797], [-1.7465], [-1.9034], ... [-1.6404], [-1.6407], [-1.7963], [-1.2850]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype0-sub-u-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.1326], [1.5078], [1.0284]]], [[[1.5024], [1.9602], ...0]]], [[[1.3609], [1.4917], [1.9039]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.8540]], [[-1.0611]], [[-1.8639]]]], [[[[-1.5523]], [[-... [[[[-1.3935]], [[-1.9957]], [[-1.7640]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype0-sub-u-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.2045], [1.2974], [1.8061]]], [[[1.8753], [1.2742], ...0]]], [[[1.7259], [1.5829], [1.6447]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.7509]], [[-1.7344]], [[-1.2542]]]], [[[[-1.2360]], [[-... [[[[-1.7859]], [[-1.6052]], [[-1.9099]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype0-sub-u-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.3858, 1.2767, 1.7062, ..., 1.6433, 1.3054, 1.6435]], [[1.3181, 1.0246, 1.9823, ..., 1.3077,... [[1.3210, 1.4520, 1.0499, ..., 1.0967, 1.5443, 1.2168]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.2816, -1.7829, -1.2513, ..., -1.3956, -1.9991, -1.1937], [-1.4111, -1.9037, -1.1999, ..., ... [-1.1115, -1.7049, -1.3863, ..., -1.2395, -1.2385, -1.1670]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype0-sub-u-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.5803, 1.6559, 1.2028, ..., 1.5431, 1.8840, 1.1356]], [[1.8486, 1.9700, 1.6580, ..., 1.9245,... [[1.3931, 1.6218, 1.5771, ..., 1.6866, 1.2030, 1.5753]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.7111, -1.1975, -1.7693, ..., -1.4811, -1.8071, -1.4275], [-1.7779, -1.5464, -1.8810, ..., ... [-1.4096, -1.1263, -1.0972, ..., -1.9600, -1.0074, -1.8000]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype0-sub-u-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.5070, 1.8985, 1.3580], [1.0577, 1.8342, 1.6111], [1.7037, 1.7832, 1.5078]]], ...5], [1.5075, 1.5201, 1.7774], [1.9907, 1.5142, 1.2260]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.9077, -1.9390, -1.2679]], [[-1.2849, -1.4536, -1.1528]], [[-1.0330, -1.4107, -1.4119...0954, -1.8068, -1.8094]], [[-1.6255, -1.5560, -1.3984]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype0-sub-u-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.8100, 1.9988, 1.2428], [1.6300, 1.3758, 1.3934], [1.9459, 1.0296, 1.3165]]], ...0], [1.9432, 1.1649, 1.4549], [1.1405, 1.8687, 1.4798]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.7357, -1.0781, -1.4468]], [[-1.0036, -1.1657, -1.0049]], [[-1.0611, -1.2706, -1.5746...6739, -1.4804, -1.1277]], [[-1.4679, -1.2661, -1.5980]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype0-sub-u-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.7209, 1.2186, 1.4605], [1.7579, 1.5613, 1.0189], [1.8783, 1.8844, 1.0266], [1.4332,... 1.5734], [1.5511, 1.8638, 1.5453], [1.5880, 1.1228, 1.7772]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.3697, -1.9308, -1.5695], [-1.1532, -1.7045, -1.0612], [-1.5865, -1.4985, -1.8168], ... [-1.4845, -1.5549, -1.5169], [-1.7183, -1.4201, -1.7051]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype0-sub-u-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.3053, 1.0704, 1.6603], [1.0259, 1.8147, 1.1821], [1.4831, 1.5738, 1.5009], [1.4151,... 1.0321], [1.0499, 1.8114, 1.6448], [1.5473, 1.8926, 1.2115]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8676, -1.8558, -1.2979], [-1.0912, -1.6381, -1.7749], [-1.2295, -1.3842, -1.2967], ... [-1.5675, -1.1446, -1.6194], [-1.8675, -1.6849, -1.1766]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype0-sub-u-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.0061], [1.1296], [1.3401], [1.7204], [1.7252], [1.1249], [1...056], [1.0773], [1.1206], [1.4145], [1.1420]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.5325], [-1.5684], [-1.5157], [-1.5332], [-1.6570], [-1.1807], ... [-1.2791], [-1.4990], [-1.0937], [-1.8953]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype0-sub-u-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8047], [1.9109], [1.2445], [1.1650], [1.3552], [1.9893], [1...630], [1.7468], [1.2085], [1.0908], [1.7873]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.4198], [-1.5899], [-1.5218], [-1.4942], [-1.5583], [-1.4868], ... [-1.2091], [-1.2143], [-1.1258], [-1.5939]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype0-sub-v-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.8081], [1.4536], [1.4497]]], [[[1.8999], [1.1807], ...2]]], [[[1.3628], [1.2081], [1.9578]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.1370]], [[-1.1055]], [[-1.1089]]]], [[[[-1.9001]], [[-... [[[[-1.4882]], [[-1.3484]], [[-1.5080]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype0-sub-v-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.6168], [1.2553], [1.8081]]], [[[1.5411], [1.9248], ...7]]], [[[1.7961], [1.8892], [1.5590]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.0837]], [[-1.6445]], [[-1.1309]]]], [[[[-1.2349]], [[-... [[[[-1.4796]], [[-1.2780]], [[-1.4309]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype0-sub-v-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.0089, 1.0754, 1.3368, ..., 1.0694, 1.4001, 1.0843]], [[1.8735, 1.0639, 1.8514, ..., 1.3515,... [[1.5492, 1.1995, 1.5861, ..., 1.2342, 1.1959, 1.7999]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.0322, -1.9287, -1.2554, ..., -1.5493, -1.7238, -1.8060], [-1.6334, -1.6815, -1.2033, ..., ... [-1.9476, -1.2942, -1.4769, ..., -1.1402, -1.0428, -1.5715]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype0-sub-v-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.9427, 1.3570, 1.3517, ..., 1.8819, 1.8076, 1.8767]], [[1.4544, 1.3232, 1.4537, ..., 1.8796,... [[1.5264, 1.6469, 1.2291, ..., 1.9659, 1.8957, 1.4669]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.6779, -1.5449, -1.0382, ..., -1.2904, -1.1138, -1.3468], [-1.3496, -1.4456, -1.5459, ..., ... [-1.4773, -1.1194, -1.0116, ..., -1.5467, -1.6157, -1.9734]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype0-sub-v-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.7458, 1.7657, 1.4151], [1.2600, 1.4569, 1.5367], [1.3295, 1.9821, 1.5931]]], ...1], [1.0123, 1.0301, 1.5919], [1.3665, 1.2468, 1.8996]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.2060, -1.3859, -1.0921]], [[-1.1290, -1.2300, -1.2926]], [[-1.6298, -1.6866, -1.7358...5441, -1.1778, -1.5040]], [[-1.3027, -1.5394, -1.3624]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype0-sub-v-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.5321, 1.0305, 1.5327], [1.6850, 1.6536, 1.1687], [1.9196, 1.7736, 1.1196]]], ...5], [1.1595, 1.2387, 1.4325], [1.5122, 1.4423, 1.0098]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.4588, -1.9205, -1.0348]], [[-1.7850, -1.9364, -1.7897]], [[-1.7497, -1.2994, -1.7457...6998, -1.3310, -1.3543]], [[-1.7408, -1.8697, -1.7401]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype0-sub-v-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.6208, 1.9368, 1.9398], [1.9322, 1.3534, 1.5727], [1.2422, 1.1381, 1.2221], [1.6868,... 1.6796], [1.5874, 1.9893, 1.5712], [1.7673, 1.1076, 1.7648]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.9574, -1.1564, -1.2574], [-1.0205, -1.3573, -1.4705], [-1.2945, -1.1003, -1.7481], ... [-1.2133, -1.2058, -1.0585], [-1.1631, -1.9594, -1.1228]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype0-sub-v-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.7514, 1.1417, 1.6500], [1.2653, 1.0678, 1.8890], [1.7940, 1.8100, 1.4603], [1.2650,... 1.7796], [1.3602, 1.9827, 1.0712], [1.4779, 1.9577, 1.1282]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.2395, -1.5719, -1.0772], [-1.5042, -1.5249, -1.8229], [-1.0387, -1.6722, -1.5680], ... [-1.4353, -1.8991, -1.3293], [-1.3211, -1.1740, -1.4684]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype0-sub-v-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.2030], [1.0913], [1.4274], [1.8483], [1.2501], [1.5615], [1...957], [1.5223], [1.9396], [1.6775], [1.1842]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.1089], [-1.8899], [-1.4904], [-1.8176], [-1.9490], [-1.2959], ... [-1.0110], [-1.8145], [-1.1015], [-1.4067]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype0-sub-v-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.7648], [1.8107], [1.0529], [1.4188], [1.8539], [1.3416], [1...053], [1.2170], [1.1587], [1.4561], [1.9027]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.1669], [-1.6292], [-1.7404], [-1.0103], [-1.9296], [-1.1864], ... [-1.3927], [-1.2513], [-1.4913], [-1.9809]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype0-sub-v-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.4307], [1.2255], [1.9464]]], [[[1.8208], [1.8948], ...3]]], [[[1.1838], [1.6442], [1.5326]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.4348]], [[-1.4004]], [[-1.8251]]]], [[[[-1.6433]], [[-... [[[[-1.8061]], [[-1.0103]], [[-1.1006]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype0-sub-v-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.1560], [1.8049], [1.8268]]], [[[1.5685], [1.7378], ...7]]], [[[1.6850], [1.9892], [1.9028]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.6812]], [[-1.1216]], [[-1.1940]]]], [[[[-1.1013]], [[-... [[[[-1.9405]], [[-1.8228]], [[-1.3331]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype0-sub-v-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.2375, 1.9205, 1.4429, ..., 1.5872, 1.1565, 1.1972]], [[1.9490, 1.9156, 1.6149, ..., 1.4956,... [[1.2211, 1.1575, 1.7708, ..., 1.6560, 1.6581, 1.0635]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.8236, -1.4624, -1.8960, ..., -1.4054, -1.7823, -1.3413], [-1.0317, -1.8734, -1.9155, ..., ... [-1.2895, -1.4679, -1.4172, ..., -1.6373, -1.3458, -1.5519]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype0-sub-v-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.0277, 1.5589, 1.4055, ..., 1.1990, 1.3343, 1.7606]], [[1.4875, 1.3194, 1.2416, ..., 1.4541,... [[1.9767, 1.1350, 1.3671, ..., 1.9145, 1.5117, 1.2325]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.2792, -1.9323, -1.0463, ..., -1.4622, -1.6796, -1.8928], [-1.3387, -1.7153, -1.9928, ..., ... [-1.7626, -1.1240, -1.8576, ..., -1.3892, -1.0745, -1.0700]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype0-sub-v-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.8448, 1.0490, 1.9188], [1.7362, 1.1335, 1.6391], [1.5041, 1.7695, 1.8473]]], ...6], [1.5217, 1.7575, 1.9225], [1.4399, 1.9825, 1.6804]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.1776, -1.8140, -1.9528]], [[-1.4693, -1.1260, -1.8132]], [[-1.9682, -1.1734, -1.9685...5300, -1.8994, -1.4045]], [[-1.9913, -1.1112, -1.5052]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype0-sub-v-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.2061, 1.2850, 1.5168], [1.9213, 1.0192, 1.8230], [1.8577, 1.6882, 1.1419]]], ...1], [1.9567, 1.9147, 1.0811], [1.9820, 1.4205, 1.7724]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.2487, -1.4802, -1.0189]], [[-1.9741, -1.3896, -1.6785]], [[-1.4235, -1.0481, -1.5631...8703, -1.5060, -1.7861]], [[-1.7890, -1.5064, -1.0124]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype0-sub-v-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5157, 1.2284, 1.5483], [1.5135, 1.7591, 1.5663], [1.3108, 1.2684, 1.0985], [1.0280,... 1.5126], [1.7437, 1.5501, 1.5671], [1.6449, 1.9118, 1.7475]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8201, -1.5074, -1.0954], [-1.8977, -1.9382, -1.1252], [-1.8844, -1.6681, -1.4401], ... [-1.8838, -1.2508, -1.3143], [-1.9232, -1.1838, -1.2590]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype0-sub-v-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.7815, 1.3237, 1.1306], [1.9882, 1.7099, 1.0903], [1.3611, 1.7088, 1.9029], [1.1769,... 1.0613], [1.9354, 1.1731, 1.1064], [1.8829, 1.6280, 1.1951]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.6253, -1.9086, -1.8840], [-1.3809, -1.6925, -1.7389], [-1.1782, -1.1114, -1.2690], ... [-1.1974, -1.4174, -1.7551], [-1.7545, -1.0210, -1.7438]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype0-sub-v-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5129], [1.9186], [1.2115], [1.4729], [1.8979], [1.3045], [1...054], [1.0511], [1.6317], [1.8599], [1.2764]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.1489], [-1.6308], [-1.5941], [-1.8378], [-1.1118], [-1.5635], ... [-1.4903], [-1.3743], [-1.8853], [-1.7077]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype0-sub-v-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5222], [1.7535], [1.0655], [1.1533], [1.7426], [1.5606], [1...285], [1.1362], [1.5242], [1.7668], [1.7805]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.4523], [-1.3801], [-1.6198], [-1.0440], [-1.3525], [-1.9313], ... [-1.4147], [-1.5066], [-1.1817], [-1.2045]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype0-sub-e-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.7449], [1.7635], [1.8669]]], [[[1.1938], [1.3712], ...4]]], [[[1.6383], [1.6873], [1.3037]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.2037]], [[-1.2268]], [[-1.5247]]]], [[[[-1.3501]], [[-... [[[[-1.2458]], [[-1.9777]], [[-1.2258]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype0-sub-e-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.2269], [1.7936], [1.1759]]], [[[1.1719], [1.4462], ...7]]], [[[1.3307], [1.6151], [1.4919]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.6927]], [[-1.1266]], [[-1.2108]]]], [[[[-1.1311]], [[-... [[[[-1.6490]], [[-1.3085]], [[-1.8337]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype0-sub-e-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.6187, 1.1569, 1.3560, ..., 1.2407, 1.2833, 1.0873]], [[1.6851, 1.4458, 1.0688, ..., 1.9424,... [[1.2099, 1.6670, 1.6875, ..., 1.8023, 1.8026, 1.5795]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.1043, -1.4621, -1.4407, ..., -1.2924, -1.5041, -1.3680], [-1.2097, -1.3834, -1.0276, ..., ... [-1.0314, -1.6095, -1.4041, ..., -1.6749, -1.4250, -1.5672]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype0-sub-e-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.4718, 1.9919, 1.8539, ..., 1.6008, 1.7537, 1.8268]], [[1.8985, 1.8986, 1.9053, ..., 1.3601,... [[1.0590, 1.0174, 1.4985, ..., 1.5533, 1.2952, 1.1889]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.9965, -1.1440, -1.2350, ..., -1.6257, -1.6736, -1.1314], [-1.3482, -1.9512, -1.7668, ..., ... [-1.7035, -1.7202, -1.5859, ..., -1.8977, -1.8309, -1.3668]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype0-sub-e-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.9236, 1.7623, 1.7527], [1.5204, 1.9112, 1.8592], [1.2829, 1.1724, 1.6406]]], ...2], [1.6559, 1.2268, 1.0099], [1.0276, 1.6253, 1.7161]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.3853, -1.4566, -1.2765]], [[-1.3921, -1.1360, -1.3648]], [[-1.2924, -1.8072, -1.7361...1739, -1.7188, -1.2760]], [[-1.1073, -1.9200, -1.7284]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype0-sub-e-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.0152, 1.4875, 1.6766], [1.6526, 1.4471, 1.9118], [1.9330, 1.3505, 1.1367]]], ...5], [1.6521, 1.2959, 1.8317], [1.0876, 1.2707, 1.8425]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.9121, -1.4096, -1.2757]], [[-1.8097, -1.1082, -1.4018]], [[-1.3705, -1.8105, -1.4635...4402, -1.5613, -1.5677]], [[-1.9975, -1.9768, -1.5101]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype0-sub-e-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1255, 1.4699, 1.0952], [1.8186, 1.7236, 1.0363], [1.7792, 1.3878, 1.3477], [1.9196,... 1.0492], [1.1547, 1.7338, 1.1795], [1.5857, 1.8285, 1.2182]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7229, -1.5014, -1.6560], [-1.3038, -1.6378, -1.7558], [-1.9026, -1.1985, -1.0516], ... [-1.8703, -1.1101, -1.3628], [-1.3953, -1.4985, -1.6028]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype0-sub-e-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8126, 1.8137, 1.2258], [1.8314, 1.9695, 1.9665], [1.2486, 1.9177, 1.8290], [1.6046,... 1.5357], [1.2190, 1.1568, 1.9296], [1.5993, 1.6460, 1.4020]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7793, -1.1808, -1.5945], [-1.8587, -1.2755, -1.6402], [-1.1299, -1.3786, -1.8830], ... [-1.4689, -1.0061, -1.5741], [-1.6151, -1.7291, -1.9376]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype0-sub-e-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5571], [1.8154], [1.8804], [1.7105], [1.3621], [1.2741], [1...935], [1.6973], [1.3097], [1.5630], [1.0679]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.2101], [-1.9144], [-1.2962], [-1.7500], [-1.4202], [-1.6153], ... [-1.8881], [-1.5175], [-1.8139], [-1.8347]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype0-sub-e-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8143], [1.5945], [1.4227], [1.0942], [1.1285], [1.2407], [1...207], [1.7838], [1.5970], [1.4553], [1.2508]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.4265], [-1.9583], [-1.7181], [-1.2552], [-1.1216], [-1.9776], ... [-1.5056], [-1.3607], [-1.7031], [-1.2049]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype0-sub-e-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.5079], [1.4000], [1.9335]]], [[[1.9383], [1.5437], ...8]]], [[[1.9664], [1.8202], [1.4638]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.2075]], [[-1.5678]], [[-1.9009]]]], [[[[-1.1652]], [[-... [[[[-1.2293]], [[-1.7755]], [[-1.8813]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype0-sub-e-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.2962], [1.0353], [1.9149]]], [[[1.9003], [1.1320], ...3]]], [[[1.4035], [1.2797], [1.0303]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.9336]], [[-1.7002]], [[-1.9657]]]], [[[[-1.8078]], [[-... [[[[-1.0974]], [[-1.1726]], [[-1.0726]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype0-sub-e-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.1719, 1.8724, 1.8231, ..., 1.9349, 1.0470, 1.4771]], [[1.2238, 1.9581, 1.0159, ..., 1.3621,... [[1.1410, 1.5489, 1.1582, ..., 1.0887, 1.9237, 1.2107]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.7591, -1.8692, -1.9027, ..., -1.2651, -1.5781, -1.7155], [-1.4262, -1.2508, -1.7612, ..., ... [-1.6187, -1.6674, -1.7329, ..., -1.6512, -1.0597, -1.0828]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype0-sub-e-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'sub', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.2609, 1.2447, 1.2461, ..., 1.1007, 1.1796, 1.3989]], [[1.3717, 1.8930, 1.8988, ..., 1.9137,... [[1.8410, 1.0951, 1.0935, ..., 1.4584, 1.7870, 1.5326]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.0897, -1.6390, -1.7608, ..., -1.9627, -1.3771, -1.5201], [-1.4182, -1.1056, -1.3893, ..., ... [-1.1565, -1.1560, -1.8686, ..., -1.5659, -1.1738, -1.2796]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype0-sub-e-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.3774, 1.9364, 1.5854], [1.6931, 1.5305, 1.2546], [1.6619, 1.2000, 1.9785]]], ...5], [1.0922, 1.2489, 1.0642], [1.1694, 1.9373, 1.0951]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.9911, -1.6350, -1.9326]], [[-1.9933, -1.7398, -1.9372]], [[-1.9157, -1.3498, -1.9232...0380, -1.6191, -1.2145]], [[-1.1348, -1.0058, -1.1735]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype0-sub-e-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.6164, 1.0290, 1.6125], [1.0701, 1.7821, 1.1912], [1.5355, 1.0683, 1.8794]]], ...6], [1.3866, 1.8491, 1.6895], [1.5823, 1.1928, 1.5527]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.1849, -1.4106, -1.0212]], [[-1.0388, -1.5373, -1.9555]], [[-1.6817, -1.9028, -1.3804...2894, -1.7692, -1.5882]], [[-1.5208, -1.0351, -1.6937]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype0-sub-e-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.0990, 1.6970, 1.1433], [1.9439, 1.7318, 1.0281], [1.3959, 1.7948, 1.2402], [1.2035,... 1.9103], [1.5973, 1.7107, 1.9550], [1.9265, 1.2730, 1.7552]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.5213, -1.2444, -1.3199], [-1.7123, -1.9139, -1.6038], [-1.7356, -1.2189, -1.3561], ... [-1.4453, -1.2536, -1.7217], [-1.2138, -1.4201, -1.5267]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype0-sub-e-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.9780, 1.0161, 1.3339], [1.3811, 1.4450, 1.2837], [1.7070, 1.6992, 1.2057], [1.2372,... 1.7804], [1.4705, 1.8394, 1.8979], [1.8011, 1.7762, 1.7827]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.2205, -1.1402, -1.7505], [-1.8027, -1.0023, -1.7252], [-1.6599, -1.5741, -1.3290], ... [-1.7672, -1.7581, -1.1136], [-1.5279, -1.2056, -1.8245]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype0-sub-e-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.4584], [1.8718], [1.9374], [1.8633], [1.1555], [1.1532], [1...776], [1.9149], [1.0426], [1.1254], [1.6656]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.6242], [-1.1407], [-1.2399], [-1.8214], [-1.2202], [-1.7181], ... [-1.6561], [-1.4180], [-1.6066], [-1.2577]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype0-sub-e-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.0549], [1.7039], [1.5695], [1.5167], [1.1730], [1.7569], [1...028], [1.1599], [1.6418], [1.3746], [1.6912]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7099], [-1.9367], [-1.2382], [-1.7006], [-1.6878], [-1.4353], ... [-1.7910], [-1.6263], [-1.4964], [-1.2293]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype0-mul-u-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.2134], [1.6927], [1.1663]]], [[[1.0484], [1.5480], ...5]]], [[[1.7529], [1.9093], [1.9264]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0028]], [[1.3396]], [[1.3365]]]], [[[[1.7038]], [[1.929... [[[[1.0211]], [[1.6583]], [[1.2251]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype0-mul-u-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.8124], [1.4372], [1.5706]]], [[[1.0559], [1.0412], ...7]]], [[[1.8111], [1.1538], [1.3779]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5152]], [[1.5933]], [[1.2429]]]], [[[[1.1496]], [[1.499... [[[[1.1042]], [[1.1431]], [[1.7290]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype0-mul-u-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.2091, 1.0849, 1.1892, ..., 1.1624, 1.1788, 1.0308]], [[1.7473, 1.4397, 1.4412, ..., 1.1906,... [[1.6193, 1.9102, 1.4762, ..., 1.4386, 1.4529, 1.3085]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4135, 1.3691, 1.6967, ..., 1.5760, 1.4393, 1.5354], [1.5953, 1.5285, 1.0346, ..., 1.4106, 1... [1.3440, 1.9955, 1.6573, ..., 1.0119, 1.1849, 1.6692]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype0-mul-u-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.1761, 1.9842, 1.3760, ..., 1.6216, 1.1509, 1.1530]], [[1.0518, 1.8946, 1.2883, ..., 1.8590,... [[1.8772, 1.5533, 1.3045, ..., 1.6520, 1.4407, 1.5422]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6305, 1.1622, 1.6379, ..., 1.2834, 1.0041, 1.5206], [1.0747, 1.7404, 1.5065, ..., 1.0867, 1... [1.2680, 1.0189, 1.6629, ..., 1.9595, 1.4079, 1.5980]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype0-mul-u-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.6189, 1.0619, 1.7755], [1.4444, 1.4630, 1.5520], [1.5291, 1.8417, 1.0868]]], ...3], [1.7410, 1.3870, 1.4729], [1.7755, 1.7466, 1.6346]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6165, 1.1898, 1.1937]], [[1.9212, 1.8140, 1.4219]], [[1.7562, 1.2732, 1.0687]], ... [[1.0762, 1.0375, 1.1871]], [[1.0939, 1.0398, 1.4344]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype0-mul-u-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.1531, 1.1349, 1.4260], [1.7951, 1.6538, 1.8236], [1.7919, 1.0299, 1.3691]]], ...9], [1.2319, 1.8481, 1.5585], [1.6313, 1.5973, 1.8582]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5877, 1.9732, 1.6806]], [[1.9310, 1.0194, 1.5979]], [[1.2982, 1.8118, 1.9418]], ... [[1.3513, 1.4033, 1.6056]], [[1.0812, 1.2319, 1.1395]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype0-mul-u-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.8215, 1.1495, 1.8998], [1.8968, 1.4857, 1.2333], [1.5748, 1.5866, 1.5953], [1.4353,... 1.6227], [1.1555, 1.8048, 1.1937], [1.0075, 1.7446, 1.4644]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9929, 1.0011, 1.3960], [1.6141, 1.7884, 1.0476], [1.2252, 1.3016, 1.8981], [1.7989,... 1.9804], [1.6108, 1.2625, 1.3904], [1.6495, 1.5807, 1.9888]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype0-mul-u-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.3418, 1.0421, 1.8527], [1.9822, 1.8347, 1.6376], [1.8090, 1.2274, 1.0674], [1.3438,... 1.1976], [1.3345, 1.7242, 1.6068], [1.0552, 1.9081, 1.2790]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0685, 1.5585, 1.9651], [1.0611, 1.2028, 1.2411], [1.3928, 1.4456, 1.2212], [1.9867,... 1.1360], [1.9507, 1.2104, 1.5330], [1.2131, 1.1469, 1.0591]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype0-mul-u-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.9901], [1.0099], [1.2603], [1.4604], [1.9943], [1.1959], [1...313], [1.0507], [1.9165], [1.3667], [1.2114]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7121], [1.7124], [1.5202], [1.3195], [1.8775], [1.3350], [1...968], [1.6054], [1.2468], [1.4611], [1.5100]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype0-mul-u-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.8475], [1.7706], [1.0445], [1.7648], [1.7366], [1.7456], [1...912], [1.6841], [1.3253], [1.3462], [1.4826]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9941], [1.1150], [1.8879], [1.9967], [1.7072], [1.2826], [1...858], [1.2684], [1.6162], [1.7221], [1.2993]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype0-mul-u-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.5797], [1.9666], [1.2427]]], [[[1.5013], [1.3110], ...0]]], [[[1.3627], [1.1619], [1.5314]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8395]], [[1.4502]], [[1.1652]]]], [[[[1.8136]], [[1.450... [[[[1.2402]], [[1.8023]], [[1.1287]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype0-mul-u-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.5343], [1.5562], [1.2622]]], [[[1.9095], [1.3036], ...0]]], [[[1.5853], [1.1440], [1.5747]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4318]], [[1.6737]], [[1.6752]]]], [[[[1.3846]], [[1.864... [[[[1.8436]], [[1.5494]], [[1.7254]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype0-mul-u-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.1007, 1.7470, 1.3938, ..., 1.6578, 1.2180, 1.1623]], [[1.8032, 1.5490, 1.1946, ..., 1.5221,... [[1.0997, 1.2849, 1.1970, ..., 1.9571, 1.6170, 1.6252]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4485, 1.3094, 1.7124, ..., 1.9970, 1.4404, 1.2315], [1.5086, 1.7793, 1.3079, ..., 1.1855, 1... [1.6540, 1.6115, 1.0852, ..., 1.5910, 1.6503, 1.5416]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype0-mul-u-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.5777, 1.3026, 1.6051, ..., 1.2941, 1.0048, 1.3647]], [[1.6322, 1.3447, 1.2878, ..., 1.1573,... [[1.4425, 1.0080, 1.2549, ..., 1.7957, 1.8067, 1.8318]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8213, 1.1308, 1.4317, ..., 1.7527, 1.2441, 1.7350], [1.6399, 1.9137, 1.5965, ..., 1.3235, 1... [1.5097, 1.5476, 1.8155, ..., 1.8486, 1.9823, 1.3170]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype0-mul-u-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.7700, 1.7520, 1.4531], [1.2431, 1.3474, 1.0914], [1.7379, 1.2985, 1.2161]]], ...5], [1.9506, 1.9100, 1.8332], [1.9846, 1.6485, 1.2340]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1568, 1.4477, 1.1027]], [[1.8659, 1.3733, 1.7064]], [[1.8554, 1.6635, 1.8083]], ... [[1.6604, 1.8937, 1.0280]], [[1.1437, 1.6765, 1.9576]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype0-mul-u-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.8741, 1.6802, 1.1296], [1.6912, 1.2217, 1.0754], [1.7648, 1.5982, 1.9413]]], ...0], [1.5531, 1.2532, 1.8947], [1.1127, 1.1183, 1.4011]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.3271, 1.2632, 1.7216]], [[1.9594, 1.2752, 1.5259]], [[1.7936, 1.0132, 1.8602]], ... [[1.5070, 1.3562, 1.3276]], [[1.7682, 1.5432, 1.6112]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype0-mul-u-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4779, 1.7472, 1.0544], [1.2223, 1.6876, 1.2872], [1.5915, 1.7222, 1.1078], [1.7720,... 1.8788], [1.4587, 1.5107, 1.2179], [1.3613, 1.2706, 1.2179]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5259, 1.8050, 1.3955], [1.7819, 1.9196, 1.1062], [1.8441, 1.9457, 1.1391], [1.4194,... 1.2173], [1.7636, 1.0176, 1.0733], [1.1939, 1.0752, 1.5212]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype0-mul-u-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.8270, 1.9664, 1.5982], [1.2623, 1.9285, 1.8091], [1.2986, 1.1988, 1.1452], [1.7210,... 1.3768], [1.1949, 1.3745, 1.7839], [1.6226, 1.1053, 1.4167]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1025, 1.2757, 1.4300], [1.9608, 1.6722, 1.2336], [1.2058, 1.2818, 1.4334], [1.3391,... 1.7659], [1.7472, 1.2533, 1.4170], [1.4822, 1.4429, 1.5920]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype0-mul-u-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5916], [1.4189], [1.2001], [1.1418], [1.0474], [1.6239], [1...816], [1.7355], [1.7824], [1.2911], [1.5173]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2780], [1.3702], [1.5535], [1.3454], [1.9052], [1.7023], [1...332], [1.1624], [1.3874], [1.0450], [1.8962]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype0-mul-u-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4979], [1.5909], [1.0408], [1.2086], [1.4024], [1.4681], [1...629], [1.8595], [1.8932], [1.3436], [1.6003]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6362], [1.4365], [1.8130], [1.5843], [1.2571], [1.3285], [1...493], [1.9271], [1.2590], [1.8946], [1.9490]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype0-mul-v-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.1392], [1.6155], [1.2149]]], [[[1.1130], [1.1667], ...3]]], [[[1.1554], [1.8648], [1.8932]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0735]], [[1.2980]], [[1.8560]]]], [[[[1.8961]], [[1.736... [[[[1.1301]], [[1.6002]], [[1.1161]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype0-mul-v-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.6066], [1.6647], [1.4467]]], [[[1.4352], [1.8905], ...8]]], [[[1.3805], [1.6044], [1.6892]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8849]], [[1.7262]], [[1.6378]]]], [[[[1.2565]], [[1.955... [[[[1.2007]], [[1.2880]], [[1.9900]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype0-mul-v-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.7254, 1.6355, 1.0465, ..., 1.1372, 1.7414, 1.4065]], [[1.7178, 1.3973, 1.4853, ..., 1.0294,... [[1.7971, 1.4275, 1.0468, ..., 1.6589, 1.8429, 1.5153]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2343, 1.8983, 1.0370, ..., 1.1321, 1.5935, 1.8846], [1.7339, 1.0833, 1.7517, ..., 1.8339, 1... [1.0796, 1.3766, 1.0487, ..., 1.2202, 1.6661, 1.8691]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype0-mul-v-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.5261, 1.3369, 1.1080, ..., 1.7421, 1.6405, 1.1153]], [[1.4193, 1.8942, 1.9111, ..., 1.9738,... [[1.1715, 1.3667, 1.3108, ..., 1.3649, 1.5390, 1.7172]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1519, 1.9878, 1.9865, ..., 1.4047, 1.6576, 1.1886], [1.6331, 1.6331, 1.0236, ..., 1.1522, 1... [1.6836, 1.9985, 1.5890, ..., 1.8129, 1.9061, 1.0408]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype0-mul-v-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.5903, 1.2385, 1.5796], [1.2301, 1.0793, 1.2229], [1.2572, 1.0428, 1.9580]]], ...0], [1.2781, 1.6750, 1.9855], [1.6559, 1.5300, 1.9880]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4013, 1.6375, 1.8113]], [[1.9821, 1.3685, 1.8000]], [[1.9631, 1.2024, 1.0593]], ... [[1.8099, 1.8005, 1.2011]], [[1.6635, 1.0638, 1.2064]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype0-mul-v-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.6890, 1.2641, 1.2064], [1.2670, 1.8662, 1.7952], [1.7735, 1.8103, 1.5439]]], ...2], [1.2924, 1.1844, 1.6015], [1.6630, 1.3261, 1.6926]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4580, 1.8963, 1.5111]], [[1.7040, 1.4660, 1.9452]], [[1.6927, 1.0419, 1.5889]], ... [[1.9356, 1.5489, 1.9919]], [[1.8441, 1.6354, 1.4700]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype0-mul-v-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.7364, 1.2589, 1.9642], [1.8319, 1.9675, 1.2334], [1.6157, 1.4322, 1.7812], [1.7950,... 1.1609], [1.2212, 1.2352, 1.8276], [1.7630, 1.7445, 1.7538]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7712, 1.4441, 1.8348], [1.3232, 1.7858, 1.2215], [1.9160, 1.9674, 1.9596], [1.5363,... 1.9783], [1.3758, 1.9904, 1.8596], [1.3909, 1.5570, 1.3388]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype0-mul-v-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.6979, 1.4592, 1.5575], [1.6044, 1.4044, 1.0300], [1.5838, 1.9546, 1.1346], [1.0080,... 1.6506], [1.5091, 1.7109, 1.5651], [1.7867, 1.3418, 1.1336]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3482, 1.2118, 1.2790], [1.2772, 1.1153, 1.0483], [1.8384, 1.9193, 1.8559], [1.2240,... 1.5710], [1.6515, 1.3912, 1.1337], [1.0881, 1.2699, 1.2054]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype0-mul-v-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.9980], [1.4273], [1.3783], [1.3429], [1.9074], [1.2742], [1...482], [1.1656], [1.3613], [1.3674], [1.3873]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9406], [1.3865], [1.6577], [1.2894], [1.9294], [1.2442], [1...214], [1.8093], [1.7464], [1.4702], [1.3710]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype0-mul-v-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4056], [1.6843], [1.9831], [1.0975], [1.9192], [1.6647], [1...045], [1.0570], [1.6374], [1.6481], [1.0612]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8677], [1.0517], [1.4482], [1.2381], [1.2830], [1.0219], [1...837], [1.6794], [1.2546], [1.6241], [1.1607]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype0-mul-v-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.3748], [1.3817], [1.4792]]], [[[1.0104], [1.6704], ...2]]], [[[1.9434], [1.1472], [1.8164]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8079]], [[1.9936]], [[1.9247]]]], [[[[1.7477]], [[1.009... [[[[1.4565]], [[1.9245]], [[1.2575]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype0-mul-v-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.6458], [1.0392], [1.0429]]], [[[1.1165], [1.5710], ...0]]], [[[1.3844], [1.8812], [1.1960]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8562]], [[1.2964]], [[1.0601]]]], [[[[1.9892]], [[1.258... [[[[1.9132]], [[1.0616]], [[1.8392]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype0-mul-v-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.3702, 1.9488, 1.3191, ..., 1.1618, 1.6973, 1.0026]], [[1.0481, 1.8692, 1.4743, ..., 1.4111,... [[1.2945, 1.5375, 1.8787, ..., 1.9438, 1.4265, 1.7877]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.3718, 1.1680, 1.4834, ..., 1.5842, 1.7346, 1.1188], [1.8824, 1.9586, 1.5324, ..., 1.0645, 1... [1.0798, 1.8629, 1.2088, ..., 1.6136, 1.8836, 1.0767]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype0-mul-v-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.3218, 1.4833, 1.5011, ..., 1.6949, 1.7228, 1.8635]], [[1.5348, 1.1614, 1.3238, ..., 1.5270,... [[1.7260, 1.4691, 1.1485, ..., 1.4666, 1.9524, 1.5599]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1004, 1.0616, 1.8746, ..., 1.9884, 1.4734, 1.1603], [1.9122, 1.1881, 1.5319, ..., 1.1466, 1... [1.3074, 1.7341, 1.1171, ..., 1.0870, 1.8793, 1.5882]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype0-mul-v-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.1938, 1.6845, 1.1217], [1.0258, 1.0165, 1.1324], [1.0129, 1.8475, 1.2167]]], ...0], [1.3707, 1.1277, 1.1724], [1.0434, 1.1374, 1.5299]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.7053, 1.5624, 1.2460]], [[1.2740, 1.5178, 1.4410]], [[1.7574, 1.7797, 1.7229]], ... [[1.9836, 1.0942, 1.2160]], [[1.3473, 1.5360, 1.8952]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype0-mul-v-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.3425, 1.8598, 1.3484], [1.9765, 1.3926, 1.4796], [1.8511, 1.1295, 1.1848]]], ...5], [1.7091, 1.5219, 1.1467], [1.1310, 1.7235, 1.5321]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.7308, 1.8088, 1.8945]], [[1.8694, 1.1523, 1.4650]], [[1.1219, 1.5921, 1.5905]], ... [[1.5459, 1.5700, 1.4220]], [[1.7689, 1.5129, 1.1816]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype0-mul-v-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5467, 1.5286, 1.7264], [1.1726, 1.0548, 1.3566], [1.2517, 1.5119, 1.5066], [1.7340,... 1.7654], [1.1969, 1.1769, 1.5616], [1.5520, 1.5859, 1.8023]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6189, 1.9072, 1.9266], [1.1187, 1.5503, 1.0251], [1.7442, 1.4871, 1.6692], [1.6484,... 1.8881], [1.2789, 1.2769, 1.0588], [1.3638, 1.7591, 1.2655]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype0-mul-v-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5829, 1.5634, 1.5521], [1.5847, 1.6746, 1.6888], [1.6371, 1.5277, 1.0083], [1.4660,... 1.6998], [1.6988, 1.8791, 1.8534], [1.6827, 1.1968, 1.0039]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3961, 1.4057, 1.3251], [1.7312, 1.5299, 1.3832], [1.1578, 1.7748, 1.4104], [1.5558,... 1.7955], [1.3854, 1.1648, 1.6940], [1.5721, 1.1498, 1.0138]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype0-mul-v-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.9298], [1.3953], [1.7581], [1.9799], [1.9749], [1.6717], [1...590], [1.1925], [1.4553], [1.8178], [1.6407]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7276], [1.0655], [1.4373], [1.6895], [1.3256], [1.1902], [1...248], [1.1730], [1.0325], [1.4388], [1.9845]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype0-mul-v-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5520], [1.7171], [1.7093], [1.1534], [1.4518], [1.0282], [1...948], [1.4260], [1.9233], [1.5457], [1.2305]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4120], [1.8860], [1.9619], [1.0533], [1.2793], [1.5232], [1...721], [1.5878], [1.9918], [1.0132], [1.9564]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype0-mul-e-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.4664], [1.6914], [1.5437]]], [[[1.7265], [1.1967], ...5]]], [[[1.8934], [1.3102], [1.8538]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4904]], [[1.8725]], [[1.5990]]]], [[[[1.1443]], [[1.811... [[[[1.7009]], [[1.1514]], [[1.9840]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype0-mul-e-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.1144], [1.0343], [1.2415]]], [[[1.5507], [1.2021], ...7]]], [[[1.6033], [1.6170], [1.4110]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8241]], [[1.6830]], [[1.9180]]]], [[[[1.5085]], [[1.597... [[[[1.1959]], [[1.1387]], [[1.4174]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype0-mul-e-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.2755, 1.4904, 1.4674, ..., 1.0711, 1.2805, 1.3667]], [[1.2688, 1.3233, 1.9057, ..., 1.3667,... [[1.9994, 1.4350, 1.6658, ..., 1.9135, 1.8815, 1.7668]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.5162, 1.0792, 1.5115, ..., 1.6078, 1.6591, 1.7418], [1.0835, 1.8042, 1.5951, ..., 1.6102, 1... [1.0492, 1.9652, 1.5004, ..., 1.2636, 1.9799, 1.8994]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype0-mul-e-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.9607, 1.6323, 1.5792, ..., 1.8773, 1.9909, 1.0221]], [[1.9169, 1.5060, 1.2363, ..., 1.0604,... [[1.3287, 1.0196, 1.1083, ..., 1.5206, 1.9129, 1.9595]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9614, 1.9484, 1.5308, ..., 1.0263, 1.6798, 1.1752], [1.3265, 1.7974, 1.2480, ..., 1.2264, 1... [1.8018, 1.3024, 1.2725, ..., 1.3863, 1.5773, 1.6542]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype0-mul-e-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.8002, 1.2539, 1.9046], [1.3710, 1.6473, 1.1905], [1.3224, 1.1616, 1.8274]]], ...2], [1.3756, 1.0690, 1.4796], [1.6007, 1.0588, 1.6470]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5640, 1.1620, 1.5567]], [[1.4942, 1.3134, 1.6760]], [[1.7928, 1.1505, 1.8410]], ... [[1.3293, 1.5288, 1.8205]], [[1.4055, 1.2419, 1.0280]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype0-mul-e-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.3004, 1.1327, 1.1910], [1.0949, 1.5986, 1.8204], [1.8070, 1.4255, 1.1531]]], ...6], [1.8762, 1.6867, 1.5626], [1.9683, 1.4182, 1.1769]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1579, 1.1016, 1.6830]], [[1.6926, 1.8125, 1.5481]], [[1.1133, 1.7029, 1.4074]], ... [[1.0099, 1.1582, 1.8603]], [[1.4307, 1.7713, 1.5930]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype0-mul-e-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.3724, 1.4591, 1.3614], [1.2558, 1.0979, 1.9516], [1.9603, 1.0510, 1.1388], [1.3860,... 1.8649], [1.3237, 1.0581, 1.6371], [1.3278, 1.9360, 1.3043]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4358, 1.0279, 1.6155], [1.1395, 1.6929, 1.0278], [1.2639, 1.8530, 1.8346], [1.7112,... 1.5788], [1.6616, 1.5229, 1.1072], [1.5474, 1.9247, 1.6150]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype0-mul-e-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5201, 1.5445, 1.6694], [1.0258, 1.2446, 1.9489], [1.4834, 1.7409, 1.0631], [1.3784,... 1.9114], [1.3358, 1.3556, 1.3065], [1.2282, 1.9135, 1.0827]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9937, 1.5702, 1.7991], [1.8722, 1.6941, 1.3649], [1.9039, 1.7871, 1.2303], [1.8948,... 1.1412], [1.3887, 1.1521, 1.7998], [1.5805, 1.9042, 1.4065]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype0-mul-e-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.1642], [1.2375], [1.5035], [1.0389], [1.8760], [1.2762], [1...997], [1.4861], [1.1064], [1.1989], [1.1036]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6195], [1.7053], [1.7031], [1.3672], [1.8734], [1.1097], [1...467], [1.0339], [1.9913], [1.5524], [1.1420]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype0-mul-e-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0768], [1.0236], [1.0092], [1.7489], [1.5153], [1.1569], [1...862], [1.7320], [1.6302], [1.4124], [1.3658]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2593], [1.4204], [1.6841], [1.1386], [1.5207], [1.7619], [1...137], [1.8948], [1.0163], [1.8506], [1.4189]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype0-mul-e-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.1658], [1.2879], [1.3308]]], [[[1.8009], [1.5493], ...3]]], [[[1.5743], [1.8128], [1.1094]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2792]], [[1.4643]], [[1.9769]]]], [[[[1.7496]], [[1.514... [[[[1.5876]], [[1.4089]], [[1.2501]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype0-mul-e-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.3075], [1.9083], [1.9296]]], [[[1.0925], [1.8390], ...4]]], [[[1.5433], [1.8838], [1.3052]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7958]], [[1.2582]], [[1.8026]]]], [[[[1.7004]], [[1.165... [[[[1.3630]], [[1.6917]], [[1.6379]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype0-mul-e-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.4085, 1.0010, 1.5031, ..., 1.4730, 1.7961, 1.6322]], [[1.4783, 1.5227, 1.9458, ..., 1.5199,... [[1.5619, 1.1602, 1.3188, ..., 1.7716, 1.7537, 1.9728]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2902, 1.9714, 1.4188, ..., 1.2555, 1.6444, 1.7722], [1.9236, 1.5605, 1.7273, ..., 1.9219, 1... [1.0559, 1.1506, 1.8783, ..., 1.5147, 1.1438, 1.1013]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype0-mul-e-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'mul', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.2531, 1.2894, 1.9953, ..., 1.6765, 1.1281, 1.5220]], [[1.8461, 1.2460, 1.2754, ..., 1.4522,... [[1.6731, 1.3153, 1.3616, ..., 1.5880, 1.8054, 1.0122]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.3276, 1.4905, 1.1411, ..., 1.6558, 1.5075, 1.2057], [1.4234, 1.8357, 1.7990, ..., 1.7897, 1... [1.8371, 1.7149, 1.0030, ..., 1.5398, 1.2794, 1.8114]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype0-mul-e-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.7589, 1.6840, 1.0621], [1.7250, 1.6501, 1.1323], [1.4652, 1.1460, 1.2079]]], ...1], [1.5370, 1.8680, 1.5698], [1.3432, 1.2846, 1.6770]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.2712, 1.9779, 1.2866]], [[1.0847, 1.0979, 1.1385]], [[1.9986, 1.6884, 1.6886]], ... [[1.5847, 1.3770, 1.0644]], [[1.3734, 1.8261, 1.9138]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype0-mul-e-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.3089, 1.8696, 1.6580], [1.0443, 1.9462, 1.2462], [1.4845, 1.4021, 1.7593]]], ...8], [1.2344, 1.3851, 1.8836], [1.6786, 1.1050, 1.1435]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6970, 1.1049, 1.7136]], [[1.2967, 1.0001, 1.5127]], [[1.3305, 1.8323, 1.7352]], ... [[1.8162, 1.0407, 1.6928]], [[1.4028, 1.7819, 1.0586]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype0-mul-e-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2224, 1.0345, 1.7816], [1.6028, 1.2182, 1.8710], [1.0095, 1.4133, 1.4941], [1.9115,... 1.0833], [1.1394, 1.7798, 1.9864], [1.2735, 1.0530, 1.9912]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6021, 1.0316, 1.1274], [1.5554, 1.1671, 1.0547], [1.5038, 1.9284, 1.1104], [1.5078,... 1.3026], [1.1184, 1.9742, 1.7479], [1.1817, 1.8012, 1.2169]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype0-mul-e-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.3028, 1.1225, 1.3290], [1.1508, 1.6815, 1.9715], [1.5951, 1.3763, 1.1440], [1.4134,... 1.3548], [1.3867, 1.4942, 1.3122], [1.6474, 1.6325, 1.3318]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8203, 1.0336, 1.4618], [1.0705, 1.8626, 1.9366], [1.4521, 1.1114, 1.4761], [1.7873,... 1.5453], [1.4926, 1.3622, 1.4683], [1.3458, 1.6815, 1.5852]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype0-mul-e-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0660], [1.6974], [1.7842], [1.3994], [1.8322], [1.8055], [1...288], [1.7974], [1.2332], [1.3429], [1.7296]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6045], [1.7298], [1.3808], [1.6797], [1.2018], [1.8308], [1...550], [1.1615], [1.5830], [1.4743], [1.1479]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype0-mul-e-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.1243], [1.8712], [1.6370], [1.2621], [1.0715], [1.9880], [1...660], [1.3321], [1.2851], [1.6220], [1.2653]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2569], [1.4489], [1.2709], [1.6247], [1.6473], [1.5295], [1...933], [1.4150], [1.7316], [1.3196], [1.0196]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype0-div-u-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.9866], [1.5480], [1.5779]]], [[[1.1447], [1.6811], ...9]]], [[[1.0643], [1.6239], [1.5745]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6377]], [[0.5649]], [[0.9094]]]], [[[[0.5400]], [[0.610... [[[[0.6701]], [[0.9450]], [[0.5892]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype0-div-u-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.9099], [1.5367], [1.8763]]], [[[1.2631], [1.9042], ...1]]], [[[1.2370], [1.4687], [1.6097]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.7625]], [[0.9214]], [[0.6954]]]], [[[[0.5010]], [[0.862... [[[[0.6420]], [[0.5795]], [[0.6126]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype0-div-u-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.8555, 1.9164, 1.5967, ..., 1.1662, 1.4512, 1.1681]], [[1.6505, 1.0522, 1.7902, ..., 1.6397,... [[1.7846, 1.9718, 1.8551, ..., 1.3848, 1.4153, 1.6421]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5024, 0.6521, 0.6525, ..., 0.8856, 0.7492, 0.8938], [0.6349, 0.7362, 0.6234, ..., 0.8438, 0... [0.5620, 0.7750, 0.7954, ..., 0.9466, 0.5349, 0.9389]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype0-div-u-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.5673, 1.2480, 1.2600, ..., 1.8784, 1.4879, 1.8699]], [[1.3166, 1.8324, 1.4255, ..., 1.6636,... [[1.1049, 1.7645, 1.3913, ..., 1.4622, 1.6118, 1.6491]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5356, 0.5359, 0.5737, ..., 0.9172, 0.8597, 0.8310], [0.7214, 0.7031, 0.7753, ..., 0.6371, 0... [0.6766, 0.5138, 0.6828, ..., 0.7288, 0.5456, 0.7602]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype0-div-u-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.8282, 1.0857, 1.0673], [1.0458, 1.4466, 1.3795], [1.5198, 1.5664, 1.1441]]], ...7], [1.2545, 1.9440, 1.0973], [1.2079, 1.4020, 1.5501]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.6112, 0.5844, 0.7674]], [[0.6056, 0.7481, 0.7537]], [[0.5407, 0.7531, 0.9137]], ...[[0.8544, 0.8876, 0.8377]], [[0.8571, 0.8380, 0.6504]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype0-div-u-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.6433, 1.9790, 1.9988], [1.9050, 1.5134, 1.2155], [1.0781, 1.8728, 1.4729]]], ...9], [1.6063, 1.1504, 1.6423], [1.2266, 1.9064, 1.9879]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.5855, 0.5222, 0.6530]], [[0.8128, 0.5014, 0.6389]], [[0.7149, 0.5459, 0.7416]], ...[[0.7247, 0.7189, 0.6988]], [[0.8799, 0.5583, 0.5074]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype0-div-u-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.1064, 1.0093, 1.4689], [1.8075, 1.7615, 1.9978], [1.1142, 1.8379, 1.9771], [1.3419,... 1.8806], [1.3655, 1.2353, 1.2232], [1.7699, 1.5047, 1.1978]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7378, 0.5708, 0.8614], [0.7521, 0.5038, 0.8533], [0.5430, 0.5513, 0.5756], [0.5486,...609], [0.6852, 0.5333, 0.5554], [0.6338, 0.7280, 0.9556]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype0-div-u-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0621, 1.7863, 1.2616], [1.4445, 1.4513, 1.8602], [1.0796, 1.2316, 1.6199], [1.3527,... 1.6357], [1.8549, 1.0398, 1.3873], [1.0191, 1.6684, 1.3556]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.8502, 0.5658, 0.8037], [0.5082, 0.7623, 0.5355], [0.7728, 0.8042, 0.8028], [0.5732,...935], [0.8172, 0.6740, 0.5931], [0.5612, 0.8446, 0.8454]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype0-div-u-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2632], [1.3147], [1.9382], [1.5844], [1.4701], [1.4112], [1...571], [1.8783], [1.2501], [1.1138], [1.1380]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5201], [0.7468], [0.7605], [0.7948], [0.5997], [0.5214], [0..., [0.6644], [0.9245], [0.8001], [0.8966]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype0-div-u-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2896], [1.0925], [1.4635], [1.7739], [1.6278], [1.8449], [1...515], [1.6816], [1.4621], [1.8584], [1.6362]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6330], [0.8562], [0.9199], [0.5616], [0.7836], [0.6908], [0..., [0.7314], [0.5124], [0.8069], [0.8822]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype0-div-u-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.3762], [1.5574], [1.0369]]], [[[1.3514], [1.4346], ...4]]], [[[1.4585], [1.3787], [1.1078]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5401]], [[0.8938]], [[0.6074]]]], [[[[0.9024]], [[0.544... [[[[0.5276]], [[0.9780]], [[0.6039]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype0-div-u-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.0216], [1.4480], [1.2476]]], [[[1.4477], [1.7414], ...4]]], [[[1.9746], [1.5940], [1.8647]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6363]], [[0.9044]], [[0.6203]]]], [[[[0.5992]], [[0.601... [[[[0.7952]], [[0.7904]], [[0.6823]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype0-div-u-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.5019, 1.3927, 1.9067, ..., 1.2266, 1.6784, 1.1655]], [[1.0651, 1.1120, 1.9446, ..., 1.7489,... [[1.2487, 1.6271, 1.5927, ..., 1.5852, 1.5059, 1.4011]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5185, 0.5625, 0.7030, ..., 0.6489, 0.8936, 0.9950], [0.5459, 0.6319, 0.9114, ..., 0.7848, 0... [0.6733, 0.6172, 0.9113, ..., 0.7580, 0.7124, 0.8746]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype0-div-u-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.6714, 1.8480, 1.5386, ..., 1.9696, 1.0237, 1.0453]], [[1.8062, 1.3812, 1.6890, ..., 1.4616,... [[1.8427, 1.1109, 1.6216, ..., 1.9619, 1.9209, 1.6633]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5655, 0.7564, 0.6766, ..., 0.8273, 0.6345, 0.9724], [0.6518, 0.8497, 0.5451, ..., 0.8419, 0... [0.7330, 0.6397, 0.7462, ..., 0.5521, 0.7679, 0.5304]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype0-div-u-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.7174, 1.5160, 1.8857], [1.8141, 1.6808, 1.9435], [1.7215, 1.5266, 1.0058]]], ...2], [2.0000, 1.7846, 1.1775], [1.5179, 1.2272, 1.2968]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.8168, 0.5050, 0.6534]], [[0.6053, 0.5178, 0.6719]], [[0.5432, 0.5030, 0.5614]], ...[[0.5908, 0.5323, 0.6337]], [[0.7600, 0.6696, 0.5185]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype0-div-u-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.1597, 1.0411, 1.3715], [1.3070, 1.8977, 1.6456], [1.0594, 1.4268, 1.6009]]], ...8], [1.9542, 1.2592, 1.8294], [1.9938, 1.8743, 1.6894]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.8155, 0.5636, 0.7172]], [[0.5441, 0.7508, 0.6817]], [[0.5434, 0.8700, 0.5293]], ...[[0.5494, 0.5600, 0.6420]], [[0.5538, 0.8677, 0.9131]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype0-div-u-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.7400, 1.9278, 1.2637], [1.4755, 1.5598, 1.1050], [1.0878, 1.8436, 1.6716], [1.3789,... 1.8671], [1.6431, 1.9599, 1.9209], [1.8181, 1.4325, 1.0683]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7030, 0.6346, 0.5024], [0.7149, 0.6148, 0.6046], [0.7011, 0.9979, 0.8029], [0.5918,...299], [0.6400, 0.5402, 0.7872], [0.8700, 0.5546, 0.5222]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype0-div-u-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4369, 1.2047, 1.7964], [1.2622, 1.6497, 1.7312], [1.2008, 1.1699, 1.6222], [1.9506,... 1.4150], [1.7175, 1.2342, 1.8687], [1.4474, 1.0995, 1.3317]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.8518, 0.9181, 0.5242], [0.7975, 0.8318, 0.5009], [0.6452, 0.6103, 0.5148], [0.7509,...651], [0.5015, 0.5095, 0.5001], [0.7272, 0.6613, 0.9556]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype0-div-u-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0654], [1.3684], [1.4169], [1.0510], [1.6884], [1.2486], [1...710], [1.5973], [1.7156], [1.5491], [1.0922]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.8495], [0.8427], [0.6854], [0.6014], [0.7531], [0.6234], [0..., [0.7437], [0.5955], [0.8215], [0.6168]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype0-div-u-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.7709], [1.7391], [1.4799], [1.1784], [1.7542], [1.5258], [1...994], [1.0645], [1.2962], [1.2424], [1.0197]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.8719], [0.5305], [0.5559], [0.6178], [0.6773], [0.6215], [0..., [0.5410], [0.6135], [0.5899], [0.6733]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype0-div-v-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.2215], [1.8061], [1.6979]]], [[[1.2019], [1.1889], ...8]]], [[[1.3645], [1.5436], [1.5398]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6664]], [[0.7925]], [[0.9062]]]], [[[[0.5621]], [[0.546... [[[[0.5843]], [[0.7033]], [[0.6909]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype0-div-v-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.3546], [1.7960], [1.3730]]], [[[1.3833], [1.9315], ...8]]], [[[1.0959], [1.9987], [1.6210]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.7196]], [[0.7577]], [[0.6283]]]], [[[[0.8342]], [[0.627... [[[[0.7039]], [[0.8008]], [[0.6043]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype0-div-v-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.3603, 1.5240, 1.3988, ..., 1.4541, 1.8781, 1.5246]], [[1.3830, 1.3463, 1.2383, ..., 1.2599,... [[1.3167, 1.5792, 1.0858, ..., 1.8480, 1.8872, 1.2833]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.8812, 0.5572, 0.6619, ..., 0.8246, 0.7454, 0.5387], [0.6458, 0.7358, 0.6061, ..., 0.7216, 0... [0.6367, 0.5411, 0.9808, ..., 0.8092, 0.6121, 0.7229]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype0-div-v-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.9313, 1.4095, 1.1829, ..., 1.4359, 1.8346, 1.9978]], [[1.2666, 1.0177, 1.4454, ..., 1.3349,... [[1.0964, 1.9068, 1.1921, ..., 1.6533, 1.8000, 1.1092]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.6297, 0.6729, 0.6590, ..., 0.8269, 0.5950, 0.6062], [0.9605, 0.5766, 0.7982, ..., 0.6771, 0... [0.9617, 0.5305, 0.5736, ..., 0.7481, 0.5414, 0.5454]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype0-div-v-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.5871, 1.6335, 1.0872], [1.1995, 1.8031, 1.3624], [1.9752, 1.1532, 1.2971]]], ...1], [1.7133, 1.0417, 1.3432], [1.1160, 1.7318, 1.1000]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.6293, 0.8364, 0.8648]], [[0.6618, 0.6059, 0.7023]], [[0.6813, 0.8854, 0.5106]], ...[[0.6516, 0.8252, 0.5535]], [[0.5345, 0.6634, 0.5462]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype0-div-v-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.8991, 1.9952, 1.8711], [1.9800, 1.8389, 1.2116], [1.4986, 1.1650, 1.5708]]], ...9], [1.9299, 1.0354, 1.1391], [1.0341, 1.7410, 1.1279]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.5616, 0.6487, 0.5987]], [[0.7017, 0.6440, 0.5593]], [[0.5563, 0.5081, 0.7718]], ...[[0.5521, 0.5048, 0.5962]], [[0.7934, 0.9548, 0.5188]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype0-div-v-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0316, 1.9402, 1.6978], [1.2887, 1.8011, 1.2617], [1.4274, 1.4769, 1.5826], [1.9380,... 1.6632], [1.6315, 1.2173, 1.6048], [1.9609, 1.4796, 1.6373]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5020, 0.6843, 0.7144], [0.5284, 0.5928, 0.8069], [0.8257, 0.8655, 0.5545], [0.6584,...889], [0.7758, 0.7542, 0.7164], [0.5042, 0.6271, 0.9211]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype0-div-v-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2567, 1.5468, 1.3266], [1.9735, 1.7894, 1.9910], [1.4111, 1.5437, 1.6004], [1.8934,... 1.3243], [1.9359, 1.5764, 1.2190], [1.7758, 1.1517, 1.0635]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9383, 0.6443, 0.8234], [0.6487, 0.7619, 0.5480], [0.5462, 0.6679, 0.7851], [0.9898,...374], [0.5627, 0.5197, 0.5443], [0.7696, 0.9537, 0.9585]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype0-div-v-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.6704], [1.8181], [1.8854], [1.9441], [1.9652], [1.2066], [1...823], [1.4391], [1.8008], [1.2599], [1.1812]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5291], [0.6635], [0.5235], [0.5431], [0.7508], [0.6023], [0..., [0.5581], [0.7853], [0.6989], [0.5768]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype0-div-v-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5089], [1.5938], [1.5492], [1.8543], [1.1548], [1.3603], [1...870], [1.1616], [1.1569], [1.9738], [1.9309]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5219], [0.7416], [0.5028], [0.5731], [0.6776], [0.5432], [0..., [0.6186], [0.5777], [0.5058], [0.6025]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype0-div-v-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.6568], [1.5851], [1.8434]]], [[[1.6286], [1.0904], ...3]]], [[[1.7986], [1.7755], [1.8013]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6731]], [[0.7254]], [[0.6020]]]], [[[[0.7011]], [[0.709... [[[[0.7159]], [[0.6563]], [[0.5240]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype0-div-v-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.8599], [1.0659], [1.4443]]], [[[1.8542], [1.5634], ...0]]], [[[1.1381], [1.5678], [1.7528]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5210]], [[0.5748]], [[0.9797]]]], [[[[0.9153]], [[0.956... [[[[0.5219]], [[0.5352]], [[0.7394]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype0-div-v-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.7659, 1.7164, 1.9858, ..., 1.4154, 1.8837, 1.0904]], [[1.0740, 1.4350, 1.5604, ..., 1.6351,... [[1.1507, 1.2047, 1.7526, ..., 1.5836, 1.3597, 1.7299]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.6831, 0.7068, 0.8253, ..., 0.6304, 0.5234, 0.6293], [0.5021, 0.5775, 0.7580, ..., 0.5969, 0... [0.5793, 0.8495, 0.6622, ..., 0.5722, 0.5267, 0.7271]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype0-div-v-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.6653, 1.3021, 1.5754, ..., 1.6931, 1.5621, 1.8680]], [[1.6887, 1.1986, 1.2692, ..., 1.6482,... [[1.4100, 1.8557, 1.8667, ..., 1.6440, 1.4655, 1.1896]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.7525, 0.9506, 0.8584, ..., 0.5325, 0.7431, 0.6060], [0.5889, 0.9232, 0.8268, ..., 0.5011, 0... [0.7712, 0.8617, 0.7013, ..., 0.5929, 0.5483, 0.5154]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype0-div-v-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.7397, 1.0739, 1.7391], [1.7849, 1.4351, 1.7063], [1.4272, 1.9307, 1.5587]]], ...7], [1.1692, 1.8509, 1.2351], [1.9321, 1.1859, 1.6659]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.7143, 0.7792, 0.7648]], [[0.5008, 0.6617, 0.7071]], [[0.5709, 0.5683, 0.5215]], ...[[0.5232, 0.9002, 0.6652]], [[0.6246, 0.8847, 0.9173]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype0-div-v-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.7885, 1.1409, 1.7997], [1.8075, 1.1268, 1.5178], [1.3090, 1.1804, 1.7897]]], ...1], [1.1748, 1.0072, 1.1882], [1.7232, 1.7574, 1.8170]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.9799, 0.7367, 0.5104]], [[0.8863, 0.6030, 0.5758]], [[0.5237, 0.5927, 0.6795]], ...[[0.9040, 0.5456, 0.6813]], [[0.7692, 0.8078, 0.6068]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype0-div-v-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0972, 1.5571, 1.4325], [1.8840, 1.5881, 1.7085], [1.0623, 1.8818, 1.4494], [1.2662,... 1.7415], [1.6961, 1.1335, 1.8053], [1.2424, 1.0903, 1.4787]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7345, 0.6697, 0.7467], [0.7335, 0.7964, 0.6312], [0.6986, 0.8320, 0.5171], [0.7699,...673], [0.5534, 0.5604, 0.6779], [0.6414, 0.8302, 0.6151]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype0-div-v-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2308, 1.2708, 1.7950], [1.0666, 1.9308, 1.8350], [1.8648, 1.0786, 1.2477], [1.4555,... 1.9389], [1.3623, 1.2261, 1.4412], [1.1856, 1.2648, 1.9553]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5794, 0.5872, 0.6625], [0.8219, 0.5103, 0.7505], [0.5330, 0.8525, 0.7770], [0.5034,...356], [0.6047, 0.5008, 0.5238], [0.8515, 0.6995, 0.7585]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype0-div-v-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2722], [1.6575], [1.1107], [1.4778], [1.8017], [1.8190], [1...473], [1.7082], [1.4082], [1.1997], [1.4178]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6307], [0.5626], [0.9464], [0.6598], [0.9720], [0.6039], [0..., [0.9013], [0.7623], [0.6689], [0.6388]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype0-div-v-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5059], [1.9850], [1.8420], [1.9422], [1.4088], [1.6924], [1...652], [1.6762], [1.7371], [1.3864], [1.9007]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6646], [0.8538], [0.7749], [0.5928], [0.5449], [0.6743], [0..., [0.7296], [0.5079], [0.5033], [0.5919]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype0-div-e-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.6206], [1.8776], [1.0995]]], [[[1.2913], [1.9432], ...9]]], [[[1.5954], [1.0246], [1.2421]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5347]], [[0.6582]], [[0.7037]]]], [[[[0.5457]], [[0.936... [[[[0.5350]], [[0.7860]], [[0.5306]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype0-div-e-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.3870], [1.9628], [1.8338]]], [[[1.5869], [1.2319], ...6]]], [[[1.3799], [1.7796], [1.4120]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.9419]], [[0.7452]], [[0.6260]]]], [[[[0.7923]], [[0.783... [[[[0.8148]], [[0.7065]], [[0.5465]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype0-div-e-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.6726, 1.2244, 1.9223, ..., 1.2894, 1.3351, 1.1244]], [[1.5337, 1.9583, 1.7329, ..., 1.3652,... [[1.4669, 1.3121, 1.4975, ..., 1.7890, 1.0135, 1.1988]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.8912, 0.5686, 0.9188, ..., 0.5845, 0.7493, 0.5541], [0.8410, 0.8880, 0.5166, ..., 0.9294, 0... [0.9067, 0.5436, 0.6268, ..., 0.5448, 0.6572, 0.7631]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype0-div-e-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.2700, 1.6312, 1.7687, ..., 1.4082, 1.6589, 1.8668]], [[1.7440, 1.3188, 1.9603, ..., 1.7350,... [[1.1321, 1.2129, 1.5962, ..., 1.0399, 1.3332, 1.9615]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.7367, 0.5115, 0.5947, ..., 0.5184, 0.5641, 0.8891], [0.6241, 0.7131, 0.5841, ..., 0.5486, 0... [0.5342, 0.6197, 0.7269, ..., 0.5177, 0.6437, 0.6298]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype0-div-e-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.6071, 1.4537, 1.1727], [1.1467, 1.2386, 1.3501], [1.8026, 1.8286, 1.5050]]], ...7], [1.2569, 1.9382, 1.3645], [1.0923, 1.4607, 1.8795]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.9701, 0.6393, 0.7174]], [[0.6485, 0.8209, 0.5691]], [[0.5969, 0.7840, 0.6058]], ...[[0.5749, 0.5963, 0.5713]], [[0.9082, 0.5799, 0.7885]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype0-div-e-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.5415, 1.1223, 1.4392], [1.8694, 1.6635, 1.6717], [1.3310, 1.6013, 1.4946]]], ...3], [1.6461, 1.3887, 1.8944], [1.1987, 1.0023, 1.3954]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.7118, 0.5675, 0.7694]], [[0.8795, 0.9166, 0.6337]], [[0.5088, 0.5661, 0.7242]], ...[[0.8291, 0.7136, 0.6220]], [[0.7039, 0.5710, 0.5498]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype0-div-e-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.6260, 1.4132, 1.2763], [1.5482, 1.3077, 1.1432], [1.0900, 1.4776, 1.3477], [1.4207,... 1.1349], [1.8939, 1.6285, 1.0954], [1.7755, 1.9335, 1.5141]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.8567, 0.5250, 0.5352], [0.8538, 0.9636, 0.6001], [0.6660, 0.6220, 0.6609], [0.5910,...040], [0.6915, 0.5684, 0.8010], [0.5657, 0.5285, 0.5073]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype0-div-e-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5862, 1.1097, 1.8181], [1.8607, 1.8071, 1.7281], [1.6437, 1.4726, 1.5934], [1.8774,... 1.8782], [1.3578, 1.2334, 1.1907], [1.5748, 1.4363, 1.1877]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7169, 0.5854, 0.7248], [0.5016, 0.6372, 0.9046], [0.6954, 0.7691, 0.9356], [0.5954,...704], [0.6248, 0.6162, 0.6571], [0.7472, 0.7193, 0.6151]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype0-div-e-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4571], [1.5717], [1.9055], [1.4150], [1.2286], [1.3118], [1...137], [1.2637], [1.1959], [1.4617], [1.3788]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5216], [0.7501], [0.9509], [0.5260], [0.8139], [0.6904], [0..., [0.5582], [0.6625], [0.5007], [0.8873]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype0-div-e-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5004], [1.2141], [1.0318], [1.8099], [1.0613], [1.0878], [1...157], [1.8194], [1.4410], [1.6976], [1.3485]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7750], [0.5197], [0.6319], [0.5978], [0.9161], [0.9469], [0..., [0.6271], [0.7250], [0.6871], [0.7623]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype0-div-e-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.2722], [1.6776], [1.7314]]], [[[1.2549], [1.8267], ...1]]], [[[1.8519], [1.1652], [1.9334]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6344]], [[0.8009]], [[0.6518]]]], [[[[0.8968]], [[0.636... [[[[0.9324]], [[0.8493]], [[0.8163]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype0-div-e-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.8473], [1.7931], [1.0033]]], [[[1.8619], [1.6976], ...5]]], [[[1.8853], [1.6152], [1.6836]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6308]], [[0.5494]], [[0.7307]]]], [[[[0.5101]], [[0.699... [[[[0.6638]], [[0.6828]], [[0.9017]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype0-div-e-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.1727, 1.5533, 1.8044, ..., 1.0666, 1.7240, 1.5950]], [[1.9833, 1.7288, 1.3037, ..., 1.3577,... [[1.0575, 1.9967, 1.7336, ..., 1.9503, 1.8868, 1.5685]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.6180, 0.6880, 0.7843, ..., 0.6692, 0.5606, 0.6481], [0.8177, 0.5315, 0.5247, ..., 0.5926, 0... [0.5920, 0.5170, 0.8276, ..., 0.6055, 0.6568, 0.5574]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype0-div-e-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'div', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.7076, 1.4480, 1.5398, ..., 1.6947, 1.5130, 1.5359]], [[1.1456, 1.7426, 1.8378, ..., 1.3600,... [[1.5781, 1.8695, 1.5133, ..., 1.3046, 1.7949, 1.4840]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.6705, 0.5309, 0.9444, ..., 0.9081, 0.6442, 0.5133], [0.5192, 0.9570, 0.9195, ..., 0.7528, 0... [0.8271, 0.7153, 0.5235, ..., 0.6383, 0.5065, 0.6299]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype0-div-e-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.3344, 1.0130, 1.8957], [1.2831, 1.3497, 1.2426], [1.5127, 1.6926, 1.9407]]], ...7], [1.2858, 1.5756, 1.0830], [1.8427, 1.6452, 1.8694]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.7007, 0.5016, 0.8777]], [[0.6689, 0.6578, 0.7337]], [[0.8645, 0.7660, 0.7256]], ...[[0.7032, 0.5886, 0.7901]], [[0.7547, 0.7369, 0.5439]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype0-div-e-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.7777, 1.4325, 1.6425], [1.9743, 1.8445, 1.8093], [1.2732, 1.6159, 1.6446]]], ...4], [1.4588, 1.7225, 1.2866], [1.0509, 1.8525, 1.2671]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.7632, 0.7313, 0.7049]], [[0.6520, 0.7494, 0.5385]], [[0.7590, 0.6425, 0.8068]], ...[[0.6496, 0.6918, 0.5038]], [[0.9643, 0.8171, 0.8920]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype0-div-e-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.6270, 1.2741, 1.0054], [1.3940, 1.0466, 1.1304], [1.5308, 1.6190, 1.2075], [1.7753,... 1.2067], [1.0556, 1.6129, 1.8009], [1.1916, 1.5847, 1.4399]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5157, 0.8907, 0.9111], [0.9636, 0.5470, 0.5099], [0.8135, 0.6030, 0.6263], [0.8674,...953], [0.7117, 0.7941, 0.5017], [0.5431, 0.5893, 0.5903]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype0-div-e-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2904, 1.2141, 1.4320], [1.0562, 1.1468, 1.7100], [1.4894, 1.6175, 1.0858], [1.4676,... 1.8934], [1.4175, 1.9799, 1.0570], [1.4635, 1.7422, 1.0403]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7132, 0.9347, 0.5530], [0.6678, 0.9866, 0.6251], [0.7381, 0.5473, 0.6246], [0.5041,...323], [0.7625, 0.7837, 0.5197], [0.5911, 0.5100, 0.6218]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype0-div-e-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.7245], [1.3808], [1.8198], [1.3664], [1.1577], [1.6889], [1...737], [1.5811], [1.8362], [1.8943], [1.2729]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6221], [0.6365], [0.6702], [0.6034], [0.5004], [0.9095], [0..., [0.6408], [0.8868], [0.5499], [0.9686]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype0-div-e-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.9236], [1.0332], [1.6238], [1.6577], [1.8218], [1.0086], [1...247], [1.1291], [1.2933], [1.8250], [1.3890]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9922], [0.6440], [0.8057], [0.8979], [0.5470], [0.7213], [0..., [0.7244], [0.6782], [0.5619], [0.5717]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype0-dot-u-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.7278], [1.1985], [1.4547]]], [[[1.1806], [1.9264], ...2]]], [[[1.7886], [1.4008], [1.3732]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3155]], [[1.1620]], [[1.8878]]]], [[[[1.9444]], [[1.459... [[[[1.9426]], [[1.5915]], [[1.5745]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype0-dot-u-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.8450], [1.9472], [1.7472]]], [[[1.0065], [1.5989], ...2]]], [[[1.5534], [1.3135], [1.5327]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5235]], [[1.7083]], [[1.2252]]]], [[[[1.4583]], [[1.368... [[[[1.9812]], [[1.7117]], [[1.7927]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype0-dot-u-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.3269, 1.9457, 1.6463, ..., 1.1097, 1.8729, 1.4492]], [[1.7134, 1.6167, 1.2678, ..., 1.1323,... [[1.9678, 1.7023, 1.3024, ..., 1.3135, 1.4632, 1.6250]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6288, 1.7727, 1.0406, ..., 1.7703, 1.9869, 1.7428], [1.9916, 1.7811, 1.7144, ..., 1.7314, 1... [1.0223, 1.5172, 1.2116, ..., 1.5314, 1.9098, 1.6673]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype0-dot-u-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.7104, 1.0629, 1.4039, ..., 1.6261, 1.9966, 1.0484]], [[1.1519, 1.9346, 1.9526, ..., 1.0984,... [[1.7235, 1.6870, 1.7802, ..., 1.6384, 1.5729, 1.7144]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9308, 1.3973, 1.7323, ..., 1.5114, 1.1363, 1.0586], [1.0815, 1.1904, 1.0445, ..., 1.9064, 1... [1.4518, 1.7539, 1.6333, ..., 1.1671, 1.0651, 1.6037]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype0-dot-u-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.9980, 1.9698, 1.4218], [1.3878, 1.5999, 1.7772], [1.9821, 1.2891, 1.2214]]], ...0], [1.8265, 1.6840, 1.0647], [1.6200, 1.0238, 1.2511]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.0800, 1.2335, 1.5837]], [[1.6338, 1.1110, 1.5362]], [[1.5167, 1.9957, 1.8914]], ... [[1.6116, 1.7307, 1.1742]], [[1.4410, 1.6614, 1.2691]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype0-dot-u-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.0958, 1.9950, 1.7911], [1.8908, 1.5230, 1.5956], [1.0566, 1.2962, 1.2489]]], ...9], [1.6994, 1.5538, 1.9586], [1.6110, 1.1991, 1.4193]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.7085, 1.1098, 1.4944]], [[1.2858, 1.0117, 1.4911]], [[1.3249, 1.4092, 1.4915]], ... [[1.8935, 1.8007, 1.2951]], [[1.1115, 1.7013, 1.6205]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype0-dot-u-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.8094, 1.2590, 1.5608], [1.6526, 1.0523, 1.6153], [1.2969, 1.4602, 1.7874], [1.2094,... 1.7101], [1.7920, 1.1720, 1.2337], [1.1319, 1.8006, 1.1801]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5378, 1.6713, 1.8225], [1.9674, 1.5754, 1.6601], [1.7436, 1.4322, 1.8114], [1.9136,... 1.9137], [1.1383, 1.4573, 1.3215], [1.8538, 1.1787, 1.6324]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype0-dot-u-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.6962, 1.2493, 1.4998], [1.0568, 1.4107, 1.7544], [1.4615, 1.8419, 1.3595], [1.7149,... 1.5138], [1.8457, 1.2552, 1.4752], [1.6774, 1.2239, 1.6404]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3354, 1.5995, 1.3639], [1.0890, 1.3111, 1.7708], [1.9060, 1.8498, 1.9762], [1.9665,... 1.6832], [1.4952, 1.5407, 1.9573], [1.7965, 1.8944, 1.0314]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype0-dot-u-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.3889], [1.3115], [1.9554], [1.2426], [1.4425], [1.5445], [1...592], [1.5307], [1.5875], [1.4478], [1.1645]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3573], [1.3823], [1.8837], [1.7541], [1.5926], [1.3008], [1...891], [1.0147], [1.8930], [1.4756], [1.2862]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype0-dot-u-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.0768], [1.5686], [1.9474], [1.8881], [1.2692], [1.6137], [1...851], [1.3342], [1.7911], [1.1184], [1.0091]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3113], [1.6323], [1.6015], [1.9089], [1.3061], [1.5841], [1...809], [1.5951], [1.3903], [1.6520], [1.5876]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype0-dot-u-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.3424], [1.1974], [1.0986]]], [[[1.7665], [1.9831], ...2]]], [[[1.3818], [1.2792], [1.8089]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8950]], [[1.8161]], [[1.3831]]]], [[[[1.6360]], [[1.508... [[[[1.3446]], [[1.8808]], [[1.1389]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype0-dot-u-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.5573], [1.5611], [1.0516]]], [[[1.3478], [1.5715], ...1]]], [[[1.1002], [1.0944], [1.7895]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5572]], [[1.0034]], [[1.6113]]]], [[[[1.2217]], [[1.441... [[[[1.8581]], [[1.0069]], [[1.6291]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype0-dot-u-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.7458, 1.2057, 1.5004, ..., 1.6580, 1.7712, 1.7219]], [[1.6799, 1.1269, 1.5448, ..., 1.5339,... [[1.4828, 1.1068, 1.5707, ..., 1.4584, 1.6281, 1.6505]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.3122, 1.4337, 1.9610, ..., 1.2124, 1.1923, 1.3032], [1.8750, 1.1824, 1.4674, ..., 1.6545, 1... [1.9398, 1.4387, 1.9524, ..., 1.9143, 1.9939, 1.3241]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype0-dot-u-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.9985, 1.8489, 1.9636, ..., 1.3808, 1.4074, 1.1565]], [[1.0604, 1.7185, 1.6332, ..., 1.5370,... [[1.0872, 1.9419, 1.2708, ..., 1.4661, 1.7932, 1.1252]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8900, 1.7806, 1.5279, ..., 1.4191, 1.6736, 1.5135], [1.8508, 1.6566, 1.9525, ..., 1.2742, 1... [1.8302, 1.5017, 1.4871, ..., 1.4715, 1.2909, 1.0376]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype0-dot-u-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.3420, 1.1752, 1.1662], [1.7904, 1.6750, 1.5941], [1.6743, 1.4719, 1.0267]]], ...9], [1.6820, 1.6346, 1.5777], [1.3533, 1.2059, 1.1186]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.7790, 1.0603, 1.7372]], [[1.1621, 1.5469, 1.5287]], [[1.3225, 1.1023, 1.9930]], ... [[1.9960, 1.3793, 1.4153]], [[1.7340, 1.0550, 1.8098]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype0-dot-u-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.4788, 1.8808, 1.1482], [1.1905, 1.3476, 1.1404], [1.7147, 1.5428, 1.4756]]], ...7], [1.8597, 1.5397, 1.7700], [1.1870, 1.7201, 1.0745]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.2938, 1.9230, 1.1467]], [[1.3676, 1.7013, 1.5962]], [[1.6257, 1.1120, 1.7499]], ... [[1.0245, 1.5601, 1.3486]], [[1.9226, 1.2918, 1.4364]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype0-dot-u-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.3515, 1.4390, 1.2811], [1.2326, 1.5047, 1.4297], [1.1645, 1.3024, 1.3400], [1.2793,... 1.4568], [1.3032, 1.1475, 1.4317], [1.4033, 1.3853, 1.9879]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2148, 1.0063, 1.0089], [1.8091, 1.7303, 1.0463], [1.7356, 1.7635, 1.4693], [1.7969,... 1.1498], [1.2894, 1.0025, 1.1320], [1.6561, 1.2177, 1.5457]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype0-dot-u-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.3429, 1.6739, 1.4585], [1.9876, 1.3148, 1.3027], [1.7958, 1.8014, 1.6535], [1.8406,... 1.0911], [1.3882, 1.8029, 1.7549], [1.3162, 1.6224, 1.3530]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4360, 1.2884, 1.0850], [1.1064, 1.6056, 1.5823], [1.2088, 1.1523, 1.6573], [1.6138,... 1.5895], [1.5702, 1.9729, 1.7977], [1.0026, 1.6325, 1.0583]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype0-dot-u-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.3873], [1.5865], [1.4412], [1.1003], [1.8325], [1.9172], [1...667], [1.5525], [1.3188], [1.1146], [1.3527]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4984], [1.6425], [1.9342], [1.4939], [1.2566], [1.9242], [1...853], [1.0968], [1.6101], [1.0907], [1.4630]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype0-dot-u-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.4265], [1.7735], [1.8362], [1.5035], [1.7469], [1.0346], [1...356], [1.7983], [1.1107], [1.2246], [1.9512]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4933], [1.5307], [1.1377], [1.1852], [1.1667], [1.1581], [1...415], [1.1753], [1.0798], [1.8934], [1.9413]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype0-dot-v-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.8109], [1.3848], [1.3433]]], [[[1.1955], [1.7171], ...4]]], [[[1.2934], [1.1054], [1.7330]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1829]], [[1.0825]], [[1.1472]]]], [[[[1.4529]], [[1.543... [[[[1.2161]], [[1.4200]], [[1.4233]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype0-dot-v-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.0744], [1.6940], [1.5025]]], [[[1.6765], [1.0223], ...0]]], [[[1.0804], [1.5469], [1.4333]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2273]], [[1.0022]], [[1.0556]]]], [[[[1.5493]], [[1.694... [[[[1.8804]], [[1.8516]], [[1.6190]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype0-dot-v-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.0196, 1.5999, 1.5410, ..., 1.0230, 1.4280, 1.4549]], [[1.2693, 1.3840, 1.0877, ..., 1.0765,... [[1.3520, 1.3783, 1.1334, ..., 1.7427, 1.9147, 1.8652]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0574, 1.1435, 1.7739, ..., 1.8774, 1.2497, 1.4348], [1.2355, 1.5981, 1.1415, ..., 1.6352, 1... [1.8673, 1.3996, 1.2863, ..., 1.9306, 1.0224, 1.1401]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype0-dot-v-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.7391, 1.9368, 1.5089, ..., 1.3556, 1.8797, 1.0403]], [[1.2135, 1.3251, 1.4423, ..., 1.0180,... [[1.7048, 1.4052, 1.3870, ..., 1.2703, 1.8691, 1.3775]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8110, 1.2446, 1.7583, ..., 1.2364, 1.9474, 1.6141], [1.1459, 1.2360, 1.8159, ..., 1.0437, 1... [1.2712, 1.3398, 1.7647, ..., 1.7920, 1.2481, 1.3812]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype0-dot-v-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.5451, 1.1690, 1.6552], [1.4909, 1.5211, 1.0802], [1.4443, 1.3110, 1.4815]]], ...4], [1.5925, 1.7659, 1.6498], [1.9981, 1.0332, 1.0156]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9011, 1.8685, 1.7323]], [[1.9146, 1.2509, 1.8612]], [[1.4872, 1.3564, 1.6758]], ... [[1.0811, 1.0718, 1.0699]], [[1.1585, 1.6600, 1.7030]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype0-dot-v-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.3306, 1.0925, 1.3724], [1.3995, 1.7009, 1.3975], [1.8031, 1.9298, 1.5359]]], ...3], [1.8219, 1.0953, 1.2929], [1.6640, 1.4438, 1.5922]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.3954, 1.9743, 1.3307]], [[1.8522, 1.5207, 1.0362]], [[1.4170, 1.1948, 1.3659]], ... [[1.9476, 1.1282, 1.5113]], [[1.3597, 1.1992, 1.5282]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype0-dot-v-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.5862, 1.0177, 1.0326], [1.1628, 1.6147, 1.7862], [1.8375, 1.0922, 1.9771], [1.7571,... 1.5679], [1.4846, 1.0449, 1.7485], [1.7227, 1.6293, 1.2641]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5624, 1.5971, 1.2074], [1.9373, 1.3221, 1.5196], [1.0126, 1.4923, 1.6577], [1.0095,... 1.0373], [1.7764, 1.4500, 1.2482], [1.9403, 1.6589, 1.6614]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype0-dot-v-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.2632, 1.7337, 1.4518], [1.2237, 1.4936, 1.7172], [1.0458, 1.9375, 1.3490], [1.7155,... 1.6768], [1.3613, 1.6168, 1.5686], [1.9657, 1.2841, 1.3926]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2517, 1.2217, 1.1005], [1.3074, 1.5364, 1.2411], [1.3278, 1.9146, 1.6942], [1.1741,... 1.5831], [1.0867, 1.5465, 1.4849], [1.5101, 1.8180, 1.7073]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype0-dot-v-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.0286], [1.9090], [1.1628], [1.1690], [1.6778], [1.9784], [1...649], [1.5639], [1.5329], [1.3372], [1.7075]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3884], [1.3421], [1.2618], [1.8970], [1.5341], [1.5839], [1...955], [1.1700], [1.4103], [1.5564], [1.5462]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype0-dot-v-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.7271], [1.3178], [1.7741], [1.3383], [1.7430], [1.3627], [1...814], [1.6940], [1.7332], [1.0119], [1.1733]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4248], [1.0926], [1.1182], [1.0449], [1.2632], [1.4493], [1...714], [1.2177], [1.6047], [1.7722], [1.5263]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype0-dot-v-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.9488], [1.9768], [1.8255]]], [[[1.3562], [1.0053], ...8]]], [[[1.9845], [1.3404], [1.0893]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3428]], [[1.8871]], [[1.9130]]]], [[[[1.5133]], [[1.239... [[[[1.8175]], [[1.8037]], [[1.2324]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype0-dot-v-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.2498], [1.2921], [1.0754]]], [[[1.9804], [1.9950], ...6]]], [[[1.3106], [1.5043], [1.2632]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0263]], [[1.9990]], [[1.2802]]]], [[[[1.5246]], [[1.446... [[[[1.6736]], [[1.7816]], [[1.9877]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype0-dot-v-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.7327, 1.9326, 1.5172, ..., 1.5516, 1.3688, 1.6956]], [[1.9141, 1.3900, 1.9849, ..., 1.6035,... [[1.3084, 1.7472, 1.3623, ..., 1.3107, 1.7680, 1.1735]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7594, 1.8808, 1.2798, ..., 1.1024, 1.9745, 1.7784], [1.4895, 1.9859, 1.2314, ..., 1.6507, 1... [1.9636, 1.5904, 1.3618, ..., 1.9647, 1.3271, 1.7885]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype0-dot-v-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.0037, 1.3297, 1.7971, ..., 1.6631, 1.8631, 1.1139]], [[1.5094, 1.5169, 1.1299, ..., 1.3116,... [[1.8861, 1.5064, 1.6445, ..., 1.5289, 1.8510, 1.5506]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7431, 1.2575, 1.3621, ..., 1.4314, 1.3507, 1.3479], [1.1549, 1.0357, 1.0044, ..., 1.8730, 1... [1.2416, 1.8925, 1.5381, ..., 1.0700, 1.6217, 1.1910]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype0-dot-v-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.1999, 1.4421, 1.3385], [1.4866, 1.9242, 1.6945], [1.3434, 1.1979, 1.0174]]], ...6], [1.0286, 1.4423, 1.3252], [1.0747, 1.8696, 1.9083]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1038, 1.4274, 1.5728]], [[1.7560, 1.8833, 1.5776]], [[1.2007, 1.4668, 1.7539]], ... [[1.4502, 1.6168, 1.9132]], [[1.4319, 1.5830, 1.1139]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype0-dot-v-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.1193, 1.7977, 1.7612], [1.3379, 1.5550, 1.8287], [1.7177, 1.7294, 1.6707]]], ...7], [1.8527, 1.4426, 1.1987], [1.5353, 1.1975, 1.5085]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1456, 1.4849, 1.2408]], [[1.5894, 1.6952, 1.7773]], [[1.9092, 1.5718, 1.2226]], ... [[1.7487, 1.9186, 1.2503]], [[1.2604, 1.5374, 1.1958]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype0-dot-v-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.0948, 1.3034, 1.1468], [1.3004, 1.1811, 1.8804], [1.2389, 1.6896, 1.0807], [1.4372,... 1.1261], [1.2962, 1.4877, 1.2900], [1.4576, 1.2524, 1.6044]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8235, 1.6558, 1.8373], [1.1642, 1.4153, 1.9821], [1.3554, 1.2323, 1.1133], [1.7685,... 1.2692], [1.8040, 1.6894, 1.3214], [1.4725, 1.4926, 1.4970]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype0-dot-v-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.9848, 1.6372, 1.8036], [1.5857, 1.8258, 1.9176], [1.0746, 1.5040, 1.3337], [1.5147,... 1.6223], [1.3714, 1.9019, 1.7524], [1.8473, 1.4799, 1.0451]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7553, 1.7868, 1.2566], [1.5894, 1.5958, 1.2966], [1.8389, 1.7501, 1.0210], [1.7327,... 1.3532], [1.1860, 1.1258, 1.3869], [1.1217, 1.5307, 1.3850]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype0-dot-v-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.3833], [1.8132], [1.5787], [1.7619], [1.8556], [1.6800], [1...146], [1.9682], [1.9957], [1.1647], [1.9897]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4136], [1.9394], [1.5522], [1.9671], [1.4397], [1.2569], [1...591], [1.6123], [1.8394], [1.8628], [1.9947]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype0-dot-v-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.4366], [1.5863], [1.0533], [1.0964], [1.6898], [1.1214], [1...559], [1.9026], [1.0101], [1.5271], [1.7303]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8719], [1.8686], [1.0219], [1.0043], [1.0426], [1.8554], [1...961], [1.3028], [1.3177], [1.2702], [1.4461]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype0-dot-e-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.3497], [1.0358], [1.3673]]], [[[1.0300], [1.3829], ...5]]], [[[1.6346], [1.6897], [1.3109]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8032]], [[1.2017]], [[1.7373]]]], [[[[1.6433]], [[1.833... [[[[1.3125]], [[1.4887]], [[1.0799]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype0-dot-e-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.0037], [1.3167], [1.6415]]], [[[1.1616], [1.2638], ...0]]], [[[1.0044], [1.9117], [1.3147]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4892]], [[1.0454]], [[1.8983]]]], [[[[1.8547]], [[1.168... [[[[1.9343]], [[1.2524]], [[1.5178]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype0-dot-e-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.8857, 1.2635, 1.2527, ..., 1.3352, 1.4465, 1.6037]], [[1.5398, 1.3520, 1.4138, ..., 1.1585,... [[1.5978, 1.3087, 1.0873, ..., 1.5006, 1.4738, 1.5287]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8324, 1.7187, 1.0281, ..., 1.1795, 1.6230, 1.7044], [1.9014, 1.2540, 1.7843, ..., 1.4041, 1... [1.5253, 1.4248, 1.1731, ..., 1.2509, 1.2427, 1.0446]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype0-dot-e-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.7978, 1.9728, 1.3555, ..., 1.5504, 1.2930, 1.3085]], [[1.3872, 1.1795, 1.9597, ..., 1.0021,... [[1.5571, 1.5482, 1.5802, ..., 1.8416, 1.7093, 1.8052]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0112, 1.8115, 1.7208, ..., 1.4334, 1.3071, 1.3689], [1.3941, 1.4290, 1.9038, ..., 1.3913, 1... [1.4471, 1.7047, 1.3690, ..., 1.4293, 1.8871, 1.8069]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype0-dot-e-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.6828, 1.0115, 1.3443], [1.5374, 1.7546, 1.6232], [1.8566, 1.7754, 1.9381]]], ...0], [1.7184, 1.0384, 1.8327], [1.4218, 1.7238, 1.2853]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8228, 1.5467, 1.1307]], [[1.9668, 1.2751, 1.2712]], [[1.2554, 1.3990, 1.3126]], ... [[1.8016, 1.5962, 1.1440]], [[1.9392, 1.6864, 1.0653]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype0-dot-e-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.2359, 1.3371, 1.0879], [1.7087, 1.0806, 1.2024], [1.6166, 1.4858, 1.6442]]], ...4], [1.3460, 1.6309, 1.4314], [1.9948, 1.1113, 1.3897]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.0672, 1.6130, 1.1200]], [[1.8595, 1.6259, 1.5739]], [[1.3548, 1.7564, 1.2176]], ... [[1.2075, 1.1170, 1.1196]], [[1.1035, 1.1918, 1.7411]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype0-dot-e-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.2835, 1.4809, 1.5264], [1.3371, 1.8595, 1.5893], [1.0765, 1.9563, 1.2598], [1.8766,... 1.9698], [1.9266, 1.3125, 1.9791], [1.2007, 1.0085, 1.2449]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0456, 1.0221, 1.4322], [1.2929, 1.1925, 1.0634], [1.1630, 1.8086, 1.6596], [1.1510,... 1.1729], [1.8895, 1.9106, 1.4158], [1.1063, 1.7025, 1.3849]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype0-dot-e-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.3024, 1.4333, 1.2108], [1.4319, 1.5613, 1.3974], [1.5019, 1.2707, 1.7073], [1.0162,... 1.5543], [1.5352, 1.9108, 1.1811], [1.9080, 1.8532, 1.2630]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1151, 1.8777, 1.3367], [1.8807, 1.0349, 1.1493], [1.1105, 1.5979, 1.6812], [1.1899,... 1.0426], [1.1841, 1.0641, 1.1777], [1.5110, 1.5020, 1.0812]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype0-dot-e-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.3374], [1.5961], [1.5253], [1.2463], [1.7980], [1.5652], [1...807], [1.9599], [1.6073], [1.0407], [1.4675]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1983], [1.9196], [1.5056], [1.1994], [1.9450], [1.1486], [1...584], [1.8539], [1.5825], [1.1240], [1.0340]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype0-dot-e-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.6480], [1.9762], [1.0556], [1.8116], [1.7008], [1.4151], [1...874], [1.2959], [1.6804], [1.4906], [1.1482]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2825], [1.5755], [1.6002], [1.3573], [1.6168], [1.9283], [1...037], [1.6585], [1.6862], [1.2711], [1.7556]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype0-dot-e-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.6965], [1.7574], [1.0968]]], [[[1.5211], [1.0152], ...3]]], [[[1.2442], [1.2443], [1.5455]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7027]], [[1.1727]], [[1.1275]]]], [[[[1.7270]], [[1.824... [[[[1.8747]], [[1.0336]], [[1.2489]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype0-dot-e-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.5104], [1.7606], [1.2982]]], [[[1.9852], [1.8114], ...9]]], [[[1.8397], [1.4997], [1.8997]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4596]], [[1.1299]], [[1.8077]]]], [[[[1.6963]], [[1.495... [[[[1.2681]], [[1.7367]], [[1.2025]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype0-dot-e-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.2936, 1.3064, 1.6907, ..., 1.2948, 1.8366, 1.6012]], [[1.1755, 1.8378, 1.7990, ..., 1.7403,... [[1.0287, 1.8056, 1.1677, ..., 1.7228, 1.5361, 1.1875]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7375, 1.4850, 1.5103, ..., 1.7238, 1.8159, 1.4189], [1.0428, 1.8035, 1.4621, ..., 1.6600, 1... [1.1664, 1.5345, 1.2418, ..., 1.4719, 1.6058, 1.1171]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype0-dot-e-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'dot', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.7529, 1.8238, 1.0054, ..., 1.4422, 1.7124, 1.1491]], [[1.9242, 1.2637, 1.1755, ..., 1.6490,... [[1.7556, 1.1254, 1.1926, ..., 1.9430, 1.5380, 1.2210]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7215, 1.6786, 1.3138, ..., 1.4638, 1.1647, 1.1155], [1.3375, 1.5924, 1.7125, ..., 1.2052, 1... [1.5085, 1.8157, 1.3332, ..., 1.9548, 1.8612, 1.7909]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype0-dot-e-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.9815, 1.2409, 1.4790], [1.0806, 1.4785, 1.7506], [1.7950, 1.3362, 1.8083]]], ...8], [1.8406, 1.4357, 1.8108], [1.0398, 1.5274, 1.4073]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4680, 1.3425, 1.4757]], [[1.7407, 1.6556, 1.6782]], [[1.4782, 1.5052, 1.8812]], ... [[1.1037, 1.6966, 1.0433]], [[1.5215, 1.6795, 1.7228]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype0-dot-e-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.2509, 1.1028, 1.2931], [1.5070, 1.0918, 1.9627], [1.7776, 1.3294, 1.7558]]], ...1], [1.8012, 1.0851, 1.1174], [1.2318, 1.9634, 1.7486]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8209, 1.5774, 1.9352]], [[1.5307, 1.4403, 1.6198]], [[1.9999, 1.7632, 1.8059]], ... [[1.0668, 1.1966, 1.9087]], [[1.0942, 1.2105, 1.8923]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype0-dot-e-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.4302, 1.8201, 1.3457], [1.1042, 1.3760, 1.1904], [1.3413, 1.0499, 1.6852], [1.1099,... 1.4921], [1.1428, 1.7793, 1.6214], [1.6927, 1.0195, 1.6632]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3658, 1.2280, 1.1978], [1.7691, 1.9099, 1.2474], [1.5229, 1.3515, 1.4848], [1.4617,... 1.1493], [1.3757, 1.3038, 1.9684], [1.1026, 1.1893, 1.0029]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype0-dot-e-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.0952, 1.5003, 1.1581], [1.0287, 1.6598, 1.8738], [1.9734, 1.4945, 1.7509], [1.3548,... 1.2096], [1.9662, 1.1699, 1.6117], [1.7480, 1.6916, 1.3036]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8872, 1.5368, 1.1589], [1.2342, 1.1644, 1.7984], [1.0335, 1.5830, 1.2710], [1.7462,... 1.8702], [1.6280, 1.3304, 1.2558], [1.9104, 1.0512, 1.7903]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype0-dot-e-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.1832], [1.6929], [1.0749], [1.6664], [1.6261], [1.3676], [1...853], [1.8530], [1.1264], [1.2744], [1.4285]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1722], [1.2304], [1.2738], [1.7734], [1.9383], [1.8808], [1...309], [1.9389], [1.8951], [1.3045], [1.9704]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype0-dot-e-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.0956], [1.2342], [1.1932], [1.9459], [1.9533], [1.4235], [1...210], [1.4134], [1.9273], [1.4785], [1.4462]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5075], [1.2227], [1.1115], [1.9942], [1.3835], [1.4978], [1...511], [1.5345], [1.0993], [1.2742], [1.9286]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_dot_e) ___________________ test_sddmm[idtype0-copy_lhs-u-v-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.6837], [1.2413], [1.6359]]], [[[1.3636], [1.7270], ...1]]], [[[1.4177], [1.6080], [1.5037]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3789]], [[1.5412]], [[1.7525]]]], [[[[1.3604]], [[1.853... [[[[1.1888]], [[1.3556]], [[1.4434]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-v-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.8100], [1.2427], [1.9228]]], [[[1.9665], [1.2009], ...8]]], [[[1.7426], [1.8462], [1.4277]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3752]], [[1.7058]], [[1.4571]]]], [[[[1.2555]], [[1.822... [[[[1.7266]], [[1.5753]], [[1.4111]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-v-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.8199, 1.4694, 1.3741, ..., 1.4988, 1.1039, 1.5017]], [[1.5580, 1.6459, 1.6290, ..., 1.6362,... [[1.1442, 1.6178, 1.1569, ..., 1.8643, 1.8336, 1.9806]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2514, 1.2732, 1.7811, ..., 1.8257, 1.2770, 1.8944], [1.6552, 1.3703, 1.2357, ..., 1.9007, 1... [1.3363, 1.3523, 1.8399, ..., 1.8144, 1.3443, 1.9490]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-v-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.0565, 1.0760, 1.0084, ..., 1.4590, 1.9797, 1.9070]], [[1.4584, 1.6912, 1.1529, ..., 1.7272,... [[1.0768, 1.3847, 1.7455, ..., 1.9571, 1.6459, 1.3916]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9511, 1.7976, 1.1552, ..., 1.1093, 1.3686, 1.0337], [1.4535, 1.9341, 1.0622, ..., 1.7464, 1... [1.1255, 1.1114, 1.9432, ..., 1.8531, 1.6606, 1.7971]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-v-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.4888, 1.9649, 1.9775], [1.6580, 1.8419, 1.2609], [1.2937, 1.4206, 1.3056]]], ...2], [1.0335, 1.0575, 1.2596], [1.9655, 1.2726, 1.4701]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9892, 1.1318, 1.4347]], [[1.6237, 1.7112, 1.2158]], [[1.8485, 1.2237, 1.4326]], ... [[1.1814, 1.8891, 1.4270]], [[1.0771, 1.4917, 1.3531]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-v-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.9010, 1.5195, 1.0210], [1.8675, 1.3542, 1.0856], [1.8470, 1.2689, 1.2086]]], ...8], [1.5452, 1.4068, 1.6212], [1.3844, 1.3942, 1.5557]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8741, 1.4867, 1.3565]], [[1.1622, 1.2473, 1.5471]], [[1.1489, 1.2734, 1.2641]], ... [[1.9079, 1.4841, 1.4489]], [[1.7003, 1.2821, 1.4313]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-v-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.1494, 1.8695, 1.2874], [1.8493, 1.3184, 1.0751], [1.4021, 1.2256, 1.2521], [1.4795,... 1.7497], [1.1192, 1.1459, 1.2280], [1.6896, 1.0224, 1.7988]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6667, 1.6617, 1.2855], [1.3824, 1.0709, 1.3413], [1.6041, 1.0791, 1.9446], [1.4008,... 1.2198], [1.1328, 1.8523, 1.2828], [1.1939, 1.1404, 1.8454]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-v-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.6231, 1.0379, 1.3574], [1.4450, 1.4854, 1.6868], [1.8645, 1.5591, 1.9867], [1.5653,... 1.6840], [1.6107, 1.0934, 1.4785], [1.5249, 1.2208, 1.5546]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2063, 1.5090, 1.5880], [1.0566, 1.0652, 1.0894], [1.3934, 1.5257, 1.5699], [1.8262,... 1.4531], [1.7918, 1.5581, 1.9753], [1.4369, 1.6758, 1.4597]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-v-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.7799], [1.1450], [1.0195], [1.6964], [1.1447], [1.8296], [1...421], [1.3083], [1.9093], [1.2553], [1.5223]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9003], [1.2734], [1.2600], [1.1282], [1.8839], [1.6089], [1...346], [1.8222], [1.8346], [1.4710], [1.7776]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-v-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.8817], [1.8024], [1.2411], [1.9283], [1.2407], [1.5111], [1...676], [1.7573], [1.5950], [1.8302], [1.3915]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2841], [1.1942], [1.8219], [1.4976], [1.2506], [1.8497], [1...077], [1.9910], [1.8363], [1.8365], [1.8555]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-e-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.9682], [1.6224], [1.4288]]], [[[1.5988], [1.8274], ...6]]], [[[1.6448], [1.9657], [1.1459]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7639]], [[1.4653]], [[1.9670]]]], [[[[1.2659]], [[1.471... [[[[1.9569]], [[1.2241]], [[1.2063]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-e-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.9123], [1.7268], [1.0714]]], [[[1.7865], [1.1983], ...0]]], [[[1.6869], [1.2344], [1.6618]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5713]], [[1.6621]], [[1.3917]]]], [[[[1.2631]], [[1.988... [[[[1.8247]], [[1.0518]], [[1.9517]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-e-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.4102, 1.8487, 1.3722, ..., 1.8695, 1.6304, 1.1701]], [[1.5535, 1.4272, 1.6620, ..., 1.1921,... [[1.1499, 1.2955, 1.4665, ..., 1.3395, 1.0404, 1.8818]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7767, 1.8110, 1.6131, ..., 1.4587, 1.5498, 1.5281], [1.4960, 1.3019, 1.0330, ..., 1.9285, 1... [1.1037, 1.9946, 1.8857, ..., 1.2944, 1.8166, 1.0929]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-e-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.7629, 1.8786, 1.1493, ..., 1.7969, 1.3521, 1.2002]], [[1.1412, 1.9673, 1.3361, ..., 1.4476,... [[1.1321, 1.9901, 1.2797, ..., 1.9628, 1.7525, 1.3087]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2523, 1.1940, 1.1055, ..., 1.5263, 1.0610, 1.1300], [1.4071, 1.2589, 1.7225, ..., 1.7323, 1... [1.3553, 1.1138, 1.4057, ..., 1.2675, 1.6391, 1.3590]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-e-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.3652, 1.1935, 1.0402], [1.7181, 1.9722, 1.5105], [1.2255, 1.3652, 1.5758]]], ...7], [1.5738, 1.6092, 1.8986], [1.4508, 1.8393, 1.8014]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6595, 1.1773, 1.0730]], [[1.8726, 1.8121, 1.4642]], [[1.8301, 1.9626, 1.6352]], ... [[1.7945, 1.0533, 1.1469]], [[1.1097, 1.9927, 1.9860]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-e-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.7496, 1.1281, 1.2810], [1.8622, 1.9471, 1.3201], [1.4043, 1.0645, 1.8991]]], ...1], [1.4503, 1.4060, 1.5070], [1.7688, 1.3931, 1.0724]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5379, 1.0749, 1.6175]], [[1.6706, 1.1107, 1.2519]], [[1.8316, 1.3205, 1.7963]], ... [[1.5038, 1.1221, 1.0400]], [[1.6048, 1.7368, 1.4570]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-e-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.0988, 1.6893, 1.6191], [1.4013, 1.8048, 1.0220], [1.5435, 1.8145, 1.2006], [1.6132,... 1.9164], [1.8424, 1.3039, 1.4599], [1.8205, 1.3453, 1.8872]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7278, 1.0361, 1.2738], [1.3780, 1.2438, 1.2102], [1.6145, 1.5063, 1.1464], [1.9012,... 1.5646], [1.3034, 1.6760, 1.5864], [1.1936, 1.5320, 1.6277]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-e-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.1588, 1.6701, 1.2237], [1.1366, 1.8710, 1.0451], [1.4581, 1.5400, 1.1282], [1.1267,... 1.9130], [1.3253, 1.5419, 1.3939], [1.3117, 1.8844, 1.0485]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3793, 1.4263, 1.3680], [1.9029, 1.9187, 1.5125], [1.3538, 1.1392, 1.7200], [1.6496,... 1.9748], [1.3449, 1.4139, 1.0515], [1.0485, 1.6476, 1.2642]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-e-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.0022], [1.4591], [1.2928], [1.2827], [1.4062], [1.8494], [1...160], [1.4533], [1.0655], [1.2637], [1.9610]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5533], [1.1482], [1.0041], [1.1086], [1.0223], [1.4067], [1...115], [1.2296], [1.0890], [1.4027], [1.3794]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-u-e-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.8608], [1.6209], [1.4177], [1.3730], [1.6429], [1.0036], [1...235], [1.5812], [1.4332], [1.4787], [1.2173]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4708], [1.9216], [1.1388], [1.4444], [1.2153], [1.4296], [1...408], [1.6456], [1.2390], [1.8241], [1.9180]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype0-copy_lhs-v-u-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.3786], [1.4524], [1.1172]]], [[[1.1668], [1.4282], ...6]]], [[[1.1066], [1.2924], [1.4381]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8587]], [[1.6105]], [[1.8337]]]], [[[[1.6670]], [[1.445... [[[[1.3177]], [[1.5683]], [[1.7306]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-u-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.4804], [1.1267], [1.3953]]], [[[1.0879], [1.1035], ...5]]], [[[1.5195], [1.8432], [1.9167]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3713]], [[1.9562]], [[1.8053]]]], [[[[1.6539]], [[1.031... [[[[1.5591]], [[1.4335]], [[1.0168]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-u-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.2176, 1.3132, 1.6770, ..., 1.0096, 1.6829, 1.9301]], [[1.4289, 1.1128, 1.8556, ..., 1.2058,... [[1.1627, 1.7093, 1.1144, ..., 1.9393, 1.9049, 1.0027]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8082, 1.7791, 1.6426, ..., 1.3908, 1.1652, 1.4104], [1.2721, 1.3060, 1.1005, ..., 1.0297, 1... [1.6216, 1.6494, 1.0018, ..., 1.8436, 1.1035, 1.1439]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-u-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.3290, 1.4561, 1.8590, ..., 1.9571, 1.8631, 1.7343]], [[1.4349, 1.3760, 1.4088, ..., 1.6353,... [[1.3656, 1.3801, 1.3830, ..., 1.0806, 1.0763, 1.2390]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9090, 1.6785, 1.4356, ..., 1.9332, 1.6074, 1.6073], [1.4716, 1.1377, 1.7245, ..., 1.0166, 1... [1.3842, 1.4930, 1.2233, ..., 1.1574, 1.9010, 1.9264]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-u-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.1377, 1.6523, 1.3953], [1.3434, 1.7607, 1.4619], [1.6065, 1.3434, 1.1614]]], ...6], [1.0325, 1.8798, 1.1702], [1.6490, 1.5688, 1.5046]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1660, 1.2248, 1.8803]], [[1.4899, 1.4458, 1.9441]], [[1.5039, 1.9143, 1.2282]], ... [[1.9571, 1.1592, 1.7972]], [[1.6322, 1.5090, 1.4939]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-u-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.1685, 1.7899, 1.8570], [1.3209, 1.0738, 1.4541], [1.4122, 1.7498, 1.3746]]], ...8], [1.2739, 1.1693, 1.2341], [1.1157, 1.5638, 1.0153]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1320, 1.1644, 1.6405]], [[1.7243, 1.3386, 1.7535]], [[1.6047, 1.4968, 1.6567]], ... [[1.4299, 1.7993, 1.8521]], [[1.1229, 1.9830, 1.3181]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-u-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.6487, 1.6244, 1.5160], [1.8860, 1.9315, 1.4237], [1.3475, 1.9309, 1.2374], [1.9431,... 1.7923], [1.7486, 1.7614, 1.2973], [1.5628, 1.2704, 1.7439]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7023, 1.3979, 1.7616], [1.7647, 1.5601, 1.7006], [1.3475, 1.9373, 1.0216], [1.7517,... 1.2401], [1.7019, 1.0516, 1.2988], [1.7480, 1.3891, 1.8313]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-u-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.3368, 1.5981, 1.1393], [1.7793, 1.5230, 1.4397], [1.7101, 1.1227, 1.5458], [1.7474,... 1.8731], [1.0056, 1.3044, 1.3595], [1.5012, 1.6378, 1.6813]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2448, 1.1398, 1.6595], [1.6795, 1.5192, 1.7598], [1.2773, 1.7914, 1.0893], [1.5313,... 1.6717], [1.0208, 1.4857, 1.2192], [1.2322, 1.3404, 1.0274]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-u-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.1949], [1.5569], [1.1484], [1.1911], [1.8957], [1.3945], [1...388], [1.3129], [1.1058], [1.1074], [1.0527]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4248], [1.3613], [1.3656], [1.4104], [1.9613], [1.4764], [1...645], [1.3018], [1.5059], [1.4383], [1.5089]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-u-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.5810], [1.6652], [1.9289], [1.5272], [1.8532], [1.0978], [1...934], [1.9446], [1.9406], [1.2659], [1.6002]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7947], [1.4581], [1.2923], [1.9881], [1.5787], [1.1047], [1...401], [1.1680], [1.5947], [1.6449], [1.6118]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-e-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.4934], [1.8750], [1.1428]]], [[[1.0847], [1.6281], ...3]]], [[[1.0370], [1.4299], [1.8161]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3847]], [[1.9502]], [[1.3862]]]], [[[[1.3226]], [[1.967... [[[[1.0026]], [[1.1400]], [[1.4025]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-e-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.4032], [1.5536], [1.1636]]], [[[1.7309], [1.3589], ...2]]], [[[1.3282], [1.6093], [1.6524]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0027]], [[1.7724]], [[1.5652]]]], [[[[1.4435]], [[1.519... [[[[1.0300]], [[1.8676]], [[1.1499]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-e-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.2908, 1.2382, 1.5887, ..., 1.5564, 1.4113, 1.7163]], [[1.5149, 1.0488, 1.6798, ..., 1.5238,... [[1.7193, 1.2198, 1.5572, ..., 1.5195, 1.9197, 1.8070]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8127, 1.2324, 1.6161, ..., 1.1316, 1.9000, 1.9988], [1.1104, 1.9191, 1.4277, ..., 1.5859, 1... [1.0821, 1.7470, 1.3086, ..., 1.8793, 1.2015, 1.1289]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-e-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.8500, 1.7437, 1.6374, ..., 1.1963, 1.3857, 1.1868]], [[1.8597, 1.2737, 1.9956, ..., 1.8287,... [[1.3530, 1.9746, 1.0798, ..., 1.5055, 1.0626, 1.7793]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4532, 1.1247, 1.9847, ..., 1.4455, 1.0533, 1.4875], [1.0787, 1.3570, 1.1898, ..., 1.4821, 1... [1.1944, 1.3382, 1.9883, ..., 1.7304, 1.5516, 1.5562]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-e-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.3318, 1.7373, 1.1638], [1.7744, 1.7960, 1.4251], [1.5133, 1.1480, 1.6928]]], ...6], [1.4074, 1.0011, 1.2896], [1.8300, 1.9250, 1.4728]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.7218, 1.3230, 1.0578]], [[1.0663, 1.4559, 1.9180]], [[1.4005, 1.6701, 1.9725]], ... [[1.0376, 1.4479, 1.3919]], [[1.0709, 1.2769, 1.1581]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-e-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.9837, 1.3453, 1.1451], [1.0393, 1.7102, 1.1231], [1.9667, 1.3427, 1.9188]]], ...3], [1.9856, 1.6677, 1.3436], [1.7302, 1.8782, 1.7244]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1536, 1.1899, 1.0132]], [[1.5045, 1.0409, 1.8340]], [[1.3792, 1.3472, 1.6824]], ... [[1.9552, 1.6238, 1.0284]], [[1.1406, 1.4779, 1.4915]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-e-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.7252, 1.0840, 1.9640], [1.1687, 1.9203, 1.1945], [1.6023, 1.0339, 1.6254], [1.4512,... 1.1999], [1.6758, 1.4689, 1.2436], [1.8462, 1.3855, 1.1263]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9029, 1.9144, 1.8875], [1.6326, 1.5043, 1.6219], [1.4152, 1.8648, 1.1577], [1.3812,... 1.8584], [1.4997, 1.2877, 1.1594], [1.1848, 1.9083, 1.3447]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-e-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.8749, 1.8069, 1.6576], [1.6872, 1.8014, 1.5111], [1.5775, 1.5092, 1.2959], [1.3529,... 1.8311], [1.9945, 1.5891, 1.8284], [1.3741, 1.1008, 1.3976]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3056, 1.0249, 1.2464], [1.4173, 1.6492, 1.4299], [1.0583, 1.1717, 1.8360], [1.1449,... 1.9553], [1.1812, 1.2312, 1.5149], [1.7617, 1.6760, 1.5054]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-e-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.0691], [1.2853], [1.8684], [1.8701], [1.4684], [1.5600], [1...370], [1.5768], [1.0738], [1.9004], [1.9864]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9871], [1.9690], [1.5126], [1.4504], [1.5510], [1.8117], [1...491], [1.4943], [1.1473], [1.1924], [1.0624]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-v-e-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.0670], [1.8762], [1.2532], [1.4050], [1.8574], [1.3349], [1...663], [1.2824], [1.3146], [1.0117], [1.8680]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0001], [1.1081], [1.3189], [1.9353], [1.0028], [1.4692], [1...740], [1.1331], [1.7331], [1.9915], [1.9095]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype0-copy_lhs-e-u-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.0624], [1.7483], [1.9800]]], [[[1.2382], [1.5803], ...3]]], [[[1.4555], [1.2134], [1.6336]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8494]], [[1.2623]], [[1.4378]]]], [[[[1.1479]], [[1.565... [[[[1.7823]], [[1.6677]], [[1.5693]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-u-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.1961], [1.3003], [1.4553]]], [[[1.4166], [1.5199], ...8]]], [[[1.0564], [1.2833], [1.8066]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7015]], [[1.0030]], [[1.7208]]]], [[[[1.0459]], [[1.087... [[[[1.2934]], [[1.6561]], [[1.9137]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-u-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.1791, 1.2826, 1.6741, ..., 1.1083, 1.8207, 1.6090]], [[1.0745, 1.4603, 1.0722, ..., 1.4101,... [[1.3109, 1.5710, 1.0713, ..., 1.9303, 1.9478, 1.1405]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2938, 1.3529, 1.6591, ..., 1.0975, 1.9537, 1.4835], [1.0146, 1.4225, 1.2202, ..., 1.6568, 1... [1.7539, 1.9383, 1.1131, ..., 1.4198, 1.6792, 1.6433]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-u-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.9904, 1.2548, 1.1574, ..., 1.8041, 1.5464, 1.4524]], [[1.5448, 1.1787, 1.4344, ..., 1.6082,... [[1.8325, 1.0489, 1.8912, ..., 1.7816, 1.8473, 1.8800]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4894, 1.3893, 1.1910, ..., 1.8879, 1.0056, 1.6098], [1.7727, 1.6522, 1.0417, ..., 1.3371, 1... [1.3661, 1.9908, 1.0858, ..., 1.5143, 1.5450, 1.7196]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-u-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.5324, 1.5485, 1.2139], [1.9242, 1.7839, 1.3781], [1.2258, 1.1341, 1.9913]]], ...6], [1.0235, 1.8113, 1.9620], [1.9312, 1.8824, 1.7906]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.3669, 1.6057, 1.8378]], [[1.6629, 1.8381, 1.9616]], [[1.2158, 1.1751, 1.2575]], ... [[1.2645, 1.9198, 1.9612]], [[1.6951, 1.0434, 1.7765]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-u-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.2801, 1.2769, 1.9722], [1.4143, 1.2311, 1.8854], [1.7968, 1.8388, 1.6983]]], ...7], [1.6830, 1.3585, 1.7203], [1.6298, 1.2057, 1.2798]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9371, 1.4806, 1.9215]], [[1.0175, 1.1972, 1.6357]], [[1.8293, 1.3152, 1.5706]], ... [[1.7327, 1.4094, 1.8264]], [[1.8283, 1.5313, 1.4920]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-u-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.6249, 1.8908, 1.3320], [1.9618, 1.4523, 1.2901], [1.1383, 1.6876, 1.1540], [1.4797,... 1.7376], [1.4533, 1.0877, 1.5004], [1.5255, 1.5416, 1.9287]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6792, 1.0753, 1.0662], [1.1235, 1.6646, 1.3864], [1.7688, 1.8523, 1.8335], [1.8866,... 1.1437], [1.1715, 1.8722, 1.6905], [1.6542, 1.8193, 1.1585]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-u-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.4485, 1.6414, 1.8405], [1.7595, 1.9777, 1.5570], [1.9310, 1.5722, 1.3900], [1.9861,... 1.1130], [1.1870, 1.2380, 1.6251], [1.6945, 1.7095, 1.3758]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1330, 1.2972, 1.8120], [1.2711, 1.7327, 1.0769], [1.5340, 1.1607, 1.4522], [1.7734,... 1.1227], [1.0456, 1.7129, 1.0157], [1.3049, 1.8204, 1.7845]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-u-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.9563], [1.7849], [1.5919], [1.8425], [1.9007], [1.6927], [1...778], [1.5990], [1.1428], [1.8271], [1.8616]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4231], [1.9177], [1.0442], [1.1806], [1.5982], [1.4427], [1...434], [1.8581], [1.1193], [1.4374], [1.7619]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-u-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.3281], [1.1975], [1.4361], [1.8411], [1.3519], [1.1455], [1...536], [1.6161], [1.4053], [1.7588], [1.1310]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5576], [1.9454], [1.8877], [1.2804], [1.6172], [1.6647], [1...801], [1.7077], [1.7798], [1.9691], [1.2515]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-v-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.8220], [1.3753], [1.2649]]], [[[1.0853], [1.2128], ...9]]], [[[1.9071], [1.9477], [1.2897]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3913]], [[1.2853]], [[1.1213]]]], [[[[1.6471]], [[1.260... [[[[1.3632]], [[1.9769]], [[1.3342]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-v-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.3445], [1.2226], [1.0950]]], [[[1.7620], [1.6099], ...4]]], [[[1.7803], [1.3603], [1.2819]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2847]], [[1.3140]], [[1.3418]]]], [[[[1.6582]], [[1.781... [[[[1.9570]], [[1.6524]], [[1.9967]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-v-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.7822, 1.9130, 1.6696, ..., 1.3939, 1.3665, 1.9430]], [[1.4002, 1.2091, 1.9487, ..., 1.8657,... [[1.3533, 1.7981, 1.0315, ..., 1.1557, 1.6062, 1.9215]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8344, 1.1288, 1.5732, ..., 1.4341, 1.4124, 1.8225], [1.1024, 1.2506, 1.2484, ..., 1.8420, 1... [1.2144, 1.3644, 1.0972, ..., 1.2569, 1.5531, 1.4400]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-v-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.1121, 1.5525, 1.3798, ..., 1.2629, 1.0121, 1.5539]], [[1.1104, 1.7773, 1.3643, ..., 1.1210,... [[1.1705, 1.7079, 1.8208, ..., 1.2935, 1.6274, 1.2704]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1581, 1.4077, 1.8030, ..., 1.9711, 1.4882, 1.6110], [1.7837, 1.6248, 1.8485, ..., 1.0428, 1... [1.6650, 1.1476, 1.6530, ..., 1.2402, 1.5438, 1.7100]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-v-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.6647, 1.7811, 1.2207], [1.5470, 1.7230, 1.3829], [1.1701, 1.9185, 1.6224]]], ...5], [1.4713, 1.6801, 1.9440], [1.0748, 1.5458, 1.6901]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.0815, 1.4871, 1.1951]], [[1.8406, 1.9330, 1.4237]], [[1.4545, 1.1599, 1.0733]], ... [[1.9461, 1.3212, 1.6651]], [[1.7065, 1.8056, 1.7461]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-v-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.7740, 1.2320, 1.4729], [1.5181, 1.7876, 1.5622], [1.2581, 1.6931, 1.7259]]], ...0], [1.4796, 1.8378, 1.1058], [1.1063, 1.6498, 1.4591]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4985, 1.1149, 1.1649]], [[1.3539, 1.0677, 1.7883]], [[1.9023, 1.4284, 1.7446]], ... [[1.7310, 1.5453, 1.7114]], [[1.8966, 1.3660, 1.0089]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-v-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.8804, 1.6431, 1.5271], [1.9561, 1.7598, 1.3268], [1.1503, 1.2076, 1.1251], [1.3837,... 1.6185], [1.1053, 1.3762, 1.5574], [1.6754, 1.0531, 1.8900]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1472, 1.3937, 1.7249], [1.5837, 1.6301, 1.4014], [1.0803, 1.8863, 1.4258], [1.2031,... 1.0856], [1.4861, 1.0540, 1.9632], [1.0737, 1.3892, 1.4466]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-v-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.1300, 1.3493, 1.9652], [1.1681, 1.6250, 1.1064], [1.3460, 1.1028, 1.4489], [1.0944,... 1.0216], [1.6589, 1.3593, 1.8940], [1.3918, 1.0341, 1.1565]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6579, 1.4902, 1.2919], [1.8761, 1.7128, 1.5741], [1.8100, 1.9962, 1.1444], [1.8325,... 1.7029], [1.8884, 1.5881, 1.2160], [1.0769, 1.6529, 1.2880]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-v-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.3459], [1.9917], [1.8277], [1.0269], [1.8051], [1.1978], [1...096], [1.2044], [1.0104], [1.7447], [1.7289]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6287], [1.5357], [1.4072], [1.6709], [1.2096], [1.3287], [1...495], [1.9947], [1.0067], [1.7133], [1.7937]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_lhs-e-v-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.7052], [1.7762], [1.0055], [1.6723], [1.8877], [1.9144], [1...072], [1.0797], [1.3217], [1.5767], [1.1520]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9021], [1.4244], [1.2568], [1.6457], [1.4845], [1.0372], [1...200], [1.4393], [1.2553], [1.5010], [1.6209]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype0-copy_rhs-u-v-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.7310], [1.0657], [1.2240]]], [[[1.3965], [1.0811], ...2]]], [[[1.0436], [1.3816], [1.2828]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2331]], [[1.6895]], [[1.1257]]]], [[[[1.2496]], [[1.325... [[[[1.3718]], [[1.0752]], [[1.5236]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-v-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.7265], [1.9321], [1.3837]]], [[[1.7754], [1.4933], ...6]]], [[[1.4535], [1.1395], [1.5508]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9610]], [[1.3691]], [[1.9871]]]], [[[[1.9613]], [[1.140... [[[[1.2451]], [[1.3396]], [[1.7465]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-v-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.4896, 1.2096, 1.4753, ..., 1.4225, 1.6737, 1.8242]], [[1.0695, 1.6274, 1.2593, ..., 1.5674,... [[1.1998, 1.0530, 1.7705, ..., 1.3930, 1.9870, 1.2553]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0018, 1.7173, 1.4749, ..., 1.2842, 1.3746, 1.2321], [1.6216, 1.4613, 1.7651, ..., 1.8785, 1... [1.7726, 1.6455, 1.4912, ..., 1.2032, 1.8312, 1.3027]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-v-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.7576, 1.6508, 1.1042, ..., 1.6452, 1.8217, 1.6348]], [[1.7744, 1.9975, 1.8305, ..., 1.4077,... [[1.3021, 1.7786, 1.4320, ..., 1.5246, 1.4244, 1.3116]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9162, 1.9451, 1.6158, ..., 1.3440, 1.0455, 1.4317], [1.5410, 1.7673, 1.2578, ..., 1.6912, 1... [1.8155, 1.7264, 1.2810, ..., 1.1311, 1.7462, 1.4883]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-v-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.5606, 1.1362, 1.3621], [1.8850, 1.3933, 1.4851], [1.5085, 1.4587, 1.9302]]], ...7], [1.0583, 1.2494, 1.5154], [1.3666, 1.0713, 1.8109]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5447, 1.7274, 1.6373]], [[1.7256, 1.8393, 1.2100]], [[1.9990, 1.7601, 1.3192]], ... [[1.1742, 1.6945, 1.2994]], [[1.6312, 1.6662, 1.8689]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-v-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.5928, 1.8326, 1.5958], [1.2508, 1.9936, 1.7170], [1.5960, 1.7105, 1.6005]]], ...6], [1.5671, 1.1695, 1.6745], [1.2064, 1.7666, 1.6059]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1500, 1.5606, 1.7945]], [[1.5205, 1.3016, 1.3024]], [[1.7308, 1.6442, 1.6868]], ... [[1.3492, 1.1115, 1.4304]], [[1.2105, 1.7126, 1.3066]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-v-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.6741, 1.2751, 1.3530], [1.3269, 1.0884, 1.9431], [1.0558, 1.8911, 1.8067], [1.4280,... 1.1530], [1.1834, 1.5423, 1.8515], [1.0019, 1.4416, 1.7749]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1244, 1.7613, 1.5178], [1.6354, 1.3367, 1.6800], [1.4833, 1.4244, 1.3942], [1.4640,... 1.2713], [1.9793, 1.9462, 1.2070], [1.0194, 1.4198, 1.7450]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-v-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.5230, 1.9641, 1.1275], [1.2700, 1.0789, 1.0602], [1.3816, 1.2927, 1.4873], [1.6465,... 1.1571], [1.1102, 1.2344, 1.6782], [1.0800, 1.4928, 1.8559]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2137, 1.7112, 1.7870], [1.8556, 1.4908, 1.4403], [1.2102, 1.0208, 1.3160], [1.7901,... 1.4717], [1.6060, 1.7150, 1.1373], [1.7181, 1.6127, 1.0733]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-v-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.9142], [1.0099], [1.1392], [1.8648], [1.3030], [1.2651], [1...262], [1.2682], [1.9401], [1.0999], [1.2500]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2527], [1.2116], [1.4314], [1.9657], [1.6812], [1.7747], [1...255], [1.0744], [1.0306], [1.7682], [1.5977]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-v-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.2950], [1.4792], [1.3251], [1.6102], [1.3907], [1.2258], [1...712], [1.1865], [1.8193], [1.5823], [1.0183]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2780], [1.9811], [1.3152], [1.2188], [1.1036], [1.6533], [1...467], [1.1017], [1.3800], [1.5287], [1.9413]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-e-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.8239], [1.1878], [1.3932]]], [[[1.3601], [1.3395], ...8]]], [[[1.5734], [1.4460], [1.2006]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3186]], [[1.2395]], [[1.6592]]]], [[[[1.9979]], [[1.528... [[[[1.9309]], [[1.2179]], [[1.7182]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-e-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.5589], [1.7977], [1.8103]]], [[[1.0118], [1.1558], ...7]]], [[[1.0052], [1.8154], [1.8506]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1023]], [[1.6913]], [[1.6532]]]], [[[[1.1780]], [[1.562... [[[[1.0236]], [[1.1743]], [[1.2616]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-e-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.0153, 1.1131, 1.1263, ..., 1.9282, 1.6426, 1.9983]], [[1.2138, 1.8278, 1.7283, ..., 1.3025,... [[1.2058, 1.3507, 1.6091, ..., 1.1750, 1.8748, 1.3622]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1054, 1.0579, 1.9309, ..., 1.6054, 1.8784, 1.5365], [1.2375, 1.7143, 1.4317, ..., 1.7698, 1... [1.1678, 1.2134, 1.7144, ..., 1.2311, 1.8233, 1.7984]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-e-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.3883, 1.4904, 1.4914, ..., 1.3795, 1.7730, 1.0199]], [[1.5048, 1.3743, 1.7862, ..., 1.8226,... [[1.6075, 1.0309, 1.9650, ..., 1.6061, 1.7148, 1.5194]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7487, 1.0307, 1.6264, ..., 1.8150, 1.5558, 1.0851], [1.8447, 1.5967, 1.5395, ..., 1.2910, 1... [1.4391, 1.6337, 1.1303, ..., 1.5440, 1.9170, 1.8048]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-e-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.7178, 1.1627, 1.8853], [1.8031, 1.8804, 1.8261], [1.5270, 1.4083, 1.5505]]], ...6], [1.0290, 1.2867, 1.4429], [1.3839, 1.0793, 1.0329]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8848, 1.9976, 1.3741]], [[1.4514, 1.0485, 1.7106]], [[1.2675, 1.2326, 1.3463]], ... [[1.0648, 1.0843, 1.4034]], [[1.8632, 1.4432, 1.1407]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-e-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.3162, 1.3183, 1.1196], [1.8659, 1.2817, 1.2870], [1.2618, 1.1015, 1.1060]]], ...3], [1.7485, 1.4346, 1.7201], [1.6311, 1.8335, 1.5046]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6275, 1.6177, 1.3027]], [[1.1447, 1.2823, 1.9677]], [[1.7510, 1.9881, 1.9009]], ... [[1.4359, 1.7138, 1.1797]], [[1.9307, 1.3845, 1.9195]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-e-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.6509, 1.0600, 1.5303], [1.7395, 1.0640, 1.1099], [1.0273, 1.8745, 1.3179], [1.2197,... 1.3883], [1.3881, 1.2809, 1.9196], [1.0514, 1.4639, 1.9897]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9767, 1.2870, 1.3829], [1.8647, 1.9612, 1.7779], [1.5523, 1.8938, 1.6575], [1.7283,... 1.8652], [1.6729, 1.1796, 1.3347], [1.1576, 1.0978, 1.6740]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-e-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.2753, 1.7513, 1.1706], [1.3801, 1.3064, 1.8480], [1.8835, 1.1395, 1.8706], [1.9023,... 1.6417], [1.3733, 1.3856, 1.9368], [1.1690, 1.9252, 1.0803]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4652, 1.3532, 1.1365], [1.7989, 1.8226, 1.9654], [1.6318, 1.7333, 1.3551], [1.7348,... 1.7347], [1.0039, 1.5503, 1.1060], [1.9282, 1.1276, 1.1299]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-e-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.4492], [1.6716], [1.9717], [1.7780], [1.4089], [1.9271], [1...393], [1.4673], [1.5461], [1.5897], [1.4662]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7396], [1.6818], [1.0858], [1.2446], [1.2372], [1.4521], [1...548], [1.6513], [1.2347], [1.6637], [1.2551]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-u-e-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.7309], [1.0474], [1.6319], [1.7930], [1.2076], [1.2347], [1...548], [1.6283], [1.4615], [1.2977], [1.4035]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2796], [1.7705], [1.1507], [1.2663], [1.5487], [1.9430], [1...344], [1.2539], [1.6720], [1.4927], [1.9496]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype0-copy_rhs-v-u-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.2655], [1.3285], [1.5473]]], [[[1.0432], [1.1595], ...9]]], [[[1.1816], [1.6608], [1.0395]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9755]], [[1.9226]], [[1.5961]]]], [[[[1.6848]], [[1.931... [[[[1.2434]], [[1.9723]], [[1.0561]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-u-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.3446], [1.1067], [1.8217]]], [[[1.0326], [1.4345], ...1]]], [[[1.8608], [1.3628], [1.0924]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8326]], [[1.4894]], [[1.4799]]]], [[[[1.6410]], [[1.766... [[[[1.0691]], [[1.9373]], [[1.6154]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-u-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.4008, 1.1111, 1.6505, ..., 1.7863, 1.7594, 1.0311]], [[1.7125, 1.8667, 1.1753, ..., 1.2737,... [[1.9052, 1.6000, 1.8308, ..., 1.5747, 1.1232, 1.5374]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7962, 1.9209, 1.8046, ..., 1.9273, 1.5640, 1.6341], [1.9088, 1.7584, 1.1204, ..., 1.0156, 1... [1.7442, 1.6289, 1.2363, ..., 1.8700, 1.9038, 1.6044]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-u-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.7605, 1.1117, 1.7899, ..., 1.6046, 1.0771, 1.3212]], [[1.3860, 1.4661, 1.4485, ..., 1.0035,... [[1.4297, 1.1885, 1.6412, ..., 1.4832, 1.2505, 1.1683]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9720, 1.6377, 1.7898, ..., 1.5301, 1.8621, 1.0592], [1.1479, 1.1074, 1.8492, ..., 1.0102, 1... [1.2968, 1.7510, 1.1970, ..., 1.3612, 1.1404, 1.4187]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-u-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.7930, 1.9441, 1.0280], [1.0159, 1.4705, 1.5682], [1.3870, 1.7904, 1.6208]]], ...1], [1.1109, 1.9330, 1.1112], [1.3300, 1.3678, 1.0521]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6198, 1.5653, 1.9837]], [[1.4937, 1.9903, 1.6151]], [[1.9125, 1.7720, 1.5975]], ... [[1.0913, 1.0988, 1.8851]], [[1.4452, 1.3879, 1.0482]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-u-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.2999, 1.8719, 1.9759], [1.3454, 1.2843, 1.2404], [1.2455, 1.5270, 1.5517]]], ...4], [1.9227, 1.5401, 1.9482], [1.6184, 1.9407, 1.7119]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1608, 1.3534, 1.4531]], [[1.1858, 1.3279, 1.5701]], [[1.9603, 1.0991, 1.3843]], ... [[1.0894, 1.8011, 1.2018]], [[1.1663, 1.3692, 1.2265]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-u-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.9639, 1.0239, 1.2571], [1.7790, 1.3941, 1.0523], [1.6039, 1.4590, 1.0021], [1.0027,... 1.4497], [1.7665, 1.0622, 1.0741], [1.3066, 1.6977, 1.1001]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5712, 1.9363, 1.3104], [1.1894, 1.8547, 1.5371], [1.9450, 1.9212, 1.5301], [1.1920,... 1.4457], [1.8432, 1.9448, 1.4763], [1.5597, 1.4313, 1.3372]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-u-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.6690, 1.2531, 1.1182], [1.8775, 1.2700, 1.7103], [1.9945, 1.8193, 1.3662], [1.5818,... 1.2935], [1.4385, 1.3757, 1.6666], [1.1990, 1.7968, 1.7922]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0893, 1.3089, 1.9598], [1.9804, 1.5762, 1.6910], [1.2411, 1.5835, 1.2766], [1.9662,... 1.3477], [1.3588, 1.7665, 1.3898], [1.2864, 1.0537, 1.5962]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-u-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.1380], [1.8415], [1.9156], [1.0732], [1.8895], [1.7877], [1...362], [1.7241], [1.4113], [1.5579], [1.0428]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1863], [1.7497], [1.4809], [1.6636], [1.7999], [1.0074], [1...259], [1.4150], [1.8555], [1.6792], [1.7574]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-u-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.9175], [1.4520], [1.3419], [1.8285], [1.0947], [1.0753], [1...549], [1.9096], [1.0978], [1.8677], [1.7710]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1751], [1.2674], [1.9035], [1.7487], [1.0413], [1.1356], [1...761], [1.3874], [1.1438], [1.9298], [1.5716]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-e-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.7053], [1.6369], [1.0544]]], [[[1.6400], [1.8655], ...7]]], [[[1.0662], [1.1424], [1.0509]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0262]], [[1.9492]], [[1.0875]]]], [[[[1.9354]], [[1.814... [[[[1.6761]], [[1.0841]], [[1.9212]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-e-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.3431], [1.0254], [1.0715]]], [[[1.0848], [1.6962], ...9]]], [[[1.4190], [1.9976], [1.6751]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2238]], [[1.9705]], [[1.4545]]]], [[[[1.1495]], [[1.170... [[[[1.7975]], [[1.2041]], [[1.8494]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-e-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.8245, 1.6041, 1.6158, ..., 1.0767, 1.6997, 1.5001]], [[1.8021, 1.0192, 1.7696, ..., 1.3858,... [[1.9196, 1.0044, 1.1139, ..., 1.4880, 1.5116, 1.3238]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2399, 1.4345, 1.6368, ..., 1.2760, 1.9195, 1.2968], [1.3433, 1.6024, 1.0472, ..., 1.4862, 1... [1.3562, 1.8342, 1.4409, ..., 1.5425, 1.0097, 1.0578]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-e-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.6144, 1.8796, 1.7662, ..., 1.8507, 1.6427, 1.2019]], [[1.1855, 1.4936, 1.0348, ..., 1.3364,... [[1.1395, 1.2236, 1.4971, ..., 1.0583, 1.1720, 1.1739]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6714, 1.0168, 1.5697, ..., 1.0543, 1.1245, 1.3481], [1.9941, 1.0869, 1.5788, ..., 1.6151, 1... [1.2195, 1.8003, 1.2144, ..., 1.8822, 1.0519, 1.1884]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-e-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.2909, 1.9960, 1.6195], [1.1515, 1.6244, 1.6965], [1.4397, 1.8664, 1.5906]]], ...0], [1.2854, 1.2643, 1.3980], [1.4757, 1.6669, 1.6785]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.7555, 1.1718, 1.2527]], [[1.0958, 1.5202, 1.2048]], [[1.5449, 1.1647, 1.1193]], ... [[1.5235, 1.2580, 1.4749]], [[1.5464, 1.6145, 1.7354]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-e-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.8593, 1.7708, 1.9548], [1.6215, 1.3200, 1.0841], [1.8254, 1.8314, 1.1558]]], ...0], [1.6998, 1.0275, 1.1913], [1.3190, 1.0625, 1.7348]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9105, 1.4618, 1.4655]], [[1.8862, 1.0712, 1.0487]], [[1.5455, 1.5366, 1.5574]], ... [[1.6548, 1.3397, 1.3083]], [[1.5714, 1.8381, 1.9192]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-e-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.9426, 1.8016, 1.2469], [1.9191, 1.4651, 1.0822], [1.9949, 1.2217, 1.5662], [1.8663,... 1.3076], [1.7460, 1.1072, 1.4484], [1.5081, 1.0971, 1.1520]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6770, 1.1039, 1.3808], [1.3925, 1.0672, 1.7036], [1.7111, 1.7083, 1.8968], [1.1813,... 1.1263], [1.7653, 1.1075, 1.8210], [1.1354, 1.8905, 1.8205]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-e-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.1290, 1.7602, 1.2775], [1.4426, 1.7664, 1.2397], [1.2406, 1.1369, 1.4241], [1.3834,... 1.1420], [1.3346, 1.0864, 1.1065], [1.7656, 1.0885, 1.5233]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1855, 1.9841, 1.4062], [1.2306, 1.9463, 1.5475], [1.9273, 1.3656, 1.4623], [1.7881,... 1.2964], [1.7121, 1.9983, 1.8245], [1.8169, 1.7527, 1.9264]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-e-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.2243], [1.9902], [1.9627], [1.6156], [1.8780], [1.1658], [1...756], [1.2187], [1.1564], [1.0408], [1.7726]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2954], [1.1140], [1.9638], [1.7642], [1.1212], [1.6473], [1...196], [1.8439], [1.0766], [1.9331], [1.0589]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-v-e-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.5586], [1.7996], [1.1192], [1.0297], [1.1971], [1.7611], [1...407], [1.8142], [1.4639], [1.2579], [1.0431]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6626], [1.4837], [1.3303], [1.4495], [1.6324], [1.8705], [1...601], [1.5088], [1.5048], [1.0666], [1.0236]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype0-copy_rhs-e-u-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.2755], [1.3966], [1.4058]]], [[[1.0125], [1.8757], ...5]]], [[[1.7288], [1.8508], [1.7447]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0562]], [[1.8950]], [[1.3700]]]], [[[[1.4632]], [[1.477... [[[[1.3227]], [[1.3506]], [[1.7976]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-u-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.9466], [1.8593], [1.9934]]], [[[1.6803], [1.7303], ...4]]], [[[1.9303], [1.5316], [1.8979]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1880]], [[1.6289]], [[1.8945]]]], [[[[1.3420]], [[1.523... [[[[1.6175]], [[1.6101]], [[1.6973]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-u-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.6012, 1.8648, 1.7628, ..., 1.8378, 1.8980, 1.1626]], [[1.3475, 1.1848, 1.9790, ..., 1.1982,... [[1.8999, 1.2714, 1.0447, ..., 1.9901, 1.3769, 1.3870]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.3802, 1.4667, 1.5615, ..., 1.9304, 1.5874, 1.4009], [1.7752, 1.8350, 1.7122, ..., 1.9160, 1... [1.7238, 1.8195, 1.0161, ..., 1.3555, 1.0746, 1.6444]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-u-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.0980, 1.4826, 1.6943, ..., 1.7534, 1.1136, 1.9065]], [[1.3097, 1.5189, 1.0925, ..., 1.8635,... [[1.5106, 1.7584, 1.0918, ..., 1.8648, 1.8079, 1.4356]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.3221, 1.2463, 1.5476, ..., 1.9612, 1.5852, 1.7656], [1.2738, 1.9087, 1.9461, ..., 1.6003, 1... [1.6153, 1.7940, 1.8366, ..., 1.3046, 1.1975, 1.3411]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-u-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.5096, 1.8434, 1.0132], [1.7790, 1.9061, 1.3367], [1.8423, 1.8252, 1.5294]]], ...4], [1.5003, 1.7620, 1.9793], [1.0718, 1.4073, 1.5609]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5961, 1.0334, 1.1779]], [[1.6756, 1.2701, 1.3071]], [[1.1770, 1.9389, 1.9999]], ... [[1.0079, 1.1828, 1.6996]], [[1.2599, 1.1376, 1.9977]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-u-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.2530, 1.9977, 1.1483], [1.2165, 1.3894, 1.9098], [1.1395, 1.6693, 1.3714]]], ...1], [1.0164, 1.7095, 1.7373], [1.2419, 1.5735, 1.9993]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5349, 1.8008, 1.0834]], [[1.6861, 1.2888, 1.6929]], [[1.2201, 1.6257, 1.3611]], ... [[1.5543, 1.0197, 1.9412]], [[1.8641, 1.6017, 1.7317]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-u-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.2006, 1.5496, 1.0844], [1.5684, 1.3320, 1.9077], [1.5733, 1.1821, 1.6386], [1.5835,... 1.1900], [1.2296, 1.6496, 1.9405], [1.5529, 1.8905, 1.1272]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7064, 1.9659, 1.0650], [1.6080, 1.1417, 1.1463], [1.5827, 1.8075, 1.5448], [1.0643,... 1.5789], [1.2039, 1.0200, 1.7040], [1.2967, 1.6156, 1.9675]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-u-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.6834, 1.7368, 1.3248], [1.0454, 1.5676, 1.1463], [1.2917, 1.3883, 1.3478], [1.7298,... 1.7546], [1.1306, 1.2643, 1.9414], [1.8721, 1.7117, 1.2132]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9712, 1.4887, 1.7183], [1.5152, 1.1987, 1.4142], [1.9554, 1.7151, 1.3356], [1.9554,... 1.0403], [1.6688, 1.9425, 1.2210], [1.7642, 1.0171, 1.9990]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-u-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.8136], [1.0292], [1.3249], [1.7070], [1.3617], [1.4978], [1...648], [1.1955], [1.9195], [1.5061], [1.0085]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7459], [1.6050], [1.4002], [1.1707], [1.2867], [1.4445], [1...594], [1.5399], [1.9134], [1.7133], [1.8590]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-u-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.5960], [1.2116], [1.0118], [1.3420], [1.7355], [1.2537], [1...321], [1.3436], [1.7690], [1.3263], [1.2679]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0193], [1.8553], [1.5332], [1.2373], [1.7843], [1.7702], [1...953], [1.1753], [1.9254], [1.4417], [1.9366]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-v-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.0257], [1.5564], [1.6735]]], [[[1.7574], [1.1896], ...6]]], [[[1.1870], [1.6611], [1.5146]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9962]], [[1.6530]], [[1.2550]]]], [[[[1.6330]], [[1.668... [[[[1.6257]], [[1.0043]], [[1.6858]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-v-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.2068], [1.7089], [1.6667]]], [[[1.9400], [1.5986], ...6]]], [[[1.6033], [1.6855], [1.1678]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1027]], [[1.8473]], [[1.1386]]]], [[[[1.5080]], [[1.411... [[[[1.2864]], [[1.7288]], [[1.9489]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-v-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.6091, 1.6454, 1.4716, ..., 1.8616, 1.6843, 1.9600]], [[1.5139, 1.3314, 1.0543, ..., 1.2685,... [[1.2620, 1.6248, 1.2271, ..., 1.6489, 1.3613, 1.5916]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8039, 1.7798, 1.4873, ..., 1.7127, 1.1045, 1.6116], [1.9957, 1.0704, 1.0743, ..., 1.1543, 1... [1.3298, 1.7809, 1.7526, ..., 1.5528, 1.0602, 1.7391]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-v-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.4636, 1.8160, 1.7627, ..., 1.4711, 1.8838, 1.0761]], [[1.3038, 1.3331, 1.5151, ..., 1.9039,... [[1.0296, 1.4721, 1.8574, ..., 1.2210, 1.4098, 1.0417]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9172, 1.9872, 1.5898, ..., 1.8814, 1.4536, 1.0514], [1.0999, 1.0333, 1.8741, ..., 1.4705, 1... [1.4220, 1.5655, 1.3559, ..., 1.9869, 1.7374, 1.9699]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-v-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.2458, 1.3479, 1.8402], [1.3437, 1.6396, 1.3057], [1.5878, 1.5778, 1.4103]]], ...0], [1.6906, 1.3375, 1.2168], [1.2763, 1.5508, 1.4283]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6273, 1.7621, 1.2358]], [[1.5897, 1.5186, 1.0311]], [[1.0716, 1.3854, 1.5894]], ... [[1.1514, 1.1048, 1.0881]], [[1.4964, 1.8147, 1.8714]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-v-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.1152, 1.4917, 1.7607], [1.4037, 1.0754, 1.5023], [1.8363, 1.8436, 1.2381]]], ...7], [1.6994, 1.9483, 1.8202], [1.4134, 1.2867, 1.4337]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8554, 1.9332, 1.9851]], [[1.0861, 1.9409, 1.9667]], [[1.2474, 1.6816, 1.0463]], ... [[1.4845, 1.2568, 1.5520]], [[1.5971, 1.9989, 1.9337]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-v-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.5360, 1.4495, 1.7952], [1.9690, 1.5946, 1.2217], [1.1777, 1.3323, 1.7600], [1.1426,... 1.9804], [1.3878, 1.7394, 1.1338], [1.7572, 1.2068, 1.9435]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6338, 1.6342, 1.1176], [1.4077, 1.7846, 1.2054], [1.4001, 1.0529, 1.6590], [1.4023,... 1.9762], [1.5035, 1.7848, 1.5263], [1.0739, 1.1247, 1.6939]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-v-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.0289, 1.1402, 1.4048], [1.7235, 1.5314, 1.9122], [1.5922, 1.1451, 1.2672], [1.6200,... 1.3440], [1.1027, 1.8939, 1.0087], [1.4126, 1.9978, 1.3046]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6102, 1.1071, 1.7786], [1.7705, 1.8336, 1.0563], [1.0936, 1.6735, 1.8456], [1.1112,... 1.3709], [1.3424, 1.6053, 1.8940], [1.4987, 1.4609, 1.6209]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-v-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.0366], [1.6394], [1.2535], [1.0064], [1.8423], [1.1767], [1...192], [1.8957], [1.4372], [1.5105], [1.8043]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6449], [1.6147], [1.7855], [1.5663], [1.8864], [1.0178], [1...278], [1.5245], [1.1148], [1.5242], [1.5755]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int32 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype0-copy_rhs-e-v-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int32 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.4155], [1.8122], [1.1344], [1.8515], [1.6163], [1.4961], [1...667], [1.7594], [1.1312], [1.9622], [1.7887]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1207], [1.4096], [1.2097], [1.5371], [1.9686], [1.5994], [1...753], [1.5654], [1.0716], [1.3416], [1.5813]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int32 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_copy_rhs_e) _____________________ test_sddmm[idtype1-add-u-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.0538], [1.0238], [1.8920]]], [[[1.5129], [1.6785], ...4]]], [[[1.7665], [1.6139], [1.8177]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2399]], [[1.7928]], [[1.9129]]]], [[[[1.5293]], [[1.198... [[[[1.0765]], [[1.2754]], [[1.9940]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype1-add-u-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.3823], [1.7771], [1.3588]]], [[[1.0440], [1.9118], ...9]]], [[[1.0188], [1.0956], [1.3980]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2014]], [[1.5210]], [[1.2375]]]], [[[[1.4609]], [[1.822... [[[[1.5467]], [[1.1544]], [[1.6318]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype1-add-u-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.6195, 1.4338, 1.8764, ..., 1.5938, 1.6521, 1.3412]], [[1.1294, 1.1809, 1.4592, ..., 1.0061,... [[1.1553, 1.3660, 1.1181, ..., 1.1456, 1.3594, 1.2059]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0602, 1.7022, 1.4194, ..., 1.3384, 1.2940, 1.6516], [1.7840, 1.7304, 1.4310, ..., 1.7326, 1... [1.1816, 1.7416, 1.0626, ..., 1.7666, 1.6093, 1.6890]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype1-add-u-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.1600, 1.3202, 1.9316, ..., 1.5596, 1.7538, 1.3835]], [[1.7017, 1.7116, 1.0206, ..., 1.0603,... [[1.5541, 1.4394, 1.2179, ..., 1.5918, 1.1283, 1.7483]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1217, 1.5164, 1.1071, ..., 1.1877, 1.2064, 1.0435], [1.8529, 1.0346, 1.2597, ..., 1.4338, 1... [1.8697, 1.9658, 1.0314, ..., 1.6742, 1.9173, 1.7853]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype1-add-u-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.7166, 1.1163, 1.0126], [1.7361, 1.6406, 1.4116], [1.3560, 1.6306, 1.2666]]], ...0], [1.2746, 1.5040, 1.0010], [1.9613, 1.5678, 1.9850]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1499, 1.1163, 1.8378]], [[1.6988, 1.1679, 1.3395]], [[1.6171, 1.5862, 1.1393]], ... [[1.2030, 1.5457, 1.2894]], [[1.0679, 1.1847, 1.3448]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype1-add-u-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.5322, 1.6398, 1.2337], [1.4101, 1.3125, 1.8689], [1.2051, 1.7237, 1.0797]]], ...8], [1.9485, 1.2556, 1.4809], [1.7045, 1.9069, 1.8979]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8330, 1.8448, 1.8374]], [[1.3421, 1.9555, 1.2751]], [[1.2130, 1.4765, 1.0832]], ... [[1.8049, 1.0599, 1.7277]], [[1.9871, 1.0453, 1.2419]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype1-add-u-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.4041, 1.2498, 1.9160], [1.7599, 1.7851, 1.8746], [1.1167, 1.7918, 1.3392], [1.0029,... 1.7866], [1.6009, 1.9302, 1.0046], [1.4175, 1.7351, 1.0155]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7312, 1.0880, 1.6387], [1.5006, 1.7762, 1.7168], [1.6555, 1.3096, 1.8716], [1.3026,... 1.0506], [1.6509, 1.7399, 1.1844], [1.7028, 1.7442, 1.2610]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype1-add-u-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8832, 1.6151, 1.3374], [1.8579, 1.2571, 1.0948], [1.8246, 1.8354, 1.3758], [1.3532,... 1.2839], [1.7480, 1.5352, 1.9231], [1.7127, 1.9099, 1.7350]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7571, 1.0550, 1.9447], [1.8379, 1.8479, 1.4755], [1.0972, 1.9695, 1.9028], [1.2951,... 1.4620], [1.4834, 1.3287, 1.9388], [1.6115, 1.1575, 1.4562]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype1-add-u-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8462], [1.4939], [1.4392], [1.3176], [1.1358], [1.0058], [1...643], [1.8248], [1.9922], [1.2181], [1.4111]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8180], [1.0905], [1.3669], [1.3680], [1.7346], [1.2131], [1...768], [1.0198], [1.1849], [1.1895], [1.7659]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype1-add-u-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.6881], [1.7776], [1.4317], [1.4669], [1.5663], [1.7057], [1...930], [1.5205], [1.3667], [1.6035], [1.3891]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5490], [1.0649], [1.0893], [1.9682], [1.9800], [1.8304], [1...873], [1.1750], [1.2546], [1.4728], [1.5281]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_add_u) _____________________ test_sddmm[idtype1-add-u-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.6496], [1.8398], [1.0663]]], [[[1.8504], [1.2146], ...3]]], [[[1.8121], [1.6003], [1.2655]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3107]], [[1.5943]], [[1.7119]]]], [[[[1.0288]], [[1.702... [[[[1.2305]], [[1.6976]], [[1.6779]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype1-add-u-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.2931], [1.6969], [1.5546]]], [[[1.5194], [1.4346], ...4]]], [[[1.6681], [1.6534], [1.1137]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1053]], [[1.8056]], [[1.1024]]]], [[[[1.5440]], [[1.824... [[[[1.8759]], [[1.0446]], [[1.4352]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype1-add-u-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.7853, 1.3318, 1.8380, ..., 1.9571, 1.9454, 1.9520]], [[1.5460, 1.3612, 1.5763, ..., 1.2955,... [[1.7257, 1.9436, 1.3274, ..., 1.4012, 1.4958, 1.9732]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6728, 1.1462, 1.1225, ..., 1.0103, 1.7624, 1.2612], [1.3764, 1.4709, 1.1657, ..., 1.8701, 1... [1.4300, 1.3555, 1.2131, ..., 1.9032, 1.2487, 1.7011]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype1-add-u-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.3147, 1.3936, 1.4539, ..., 1.9280, 1.5986, 1.1598]], [[1.8093, 1.8279, 1.1898, ..., 1.6844,... [[1.1054, 1.1837, 1.2861, ..., 1.1124, 1.3360, 1.2909]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.5682, 1.7346, 1.3543, ..., 1.0799, 1.5630, 1.0068], [1.3162, 1.0977, 1.5009, ..., 1.5082, 1... [1.9870, 1.7976, 1.0759, ..., 1.8762, 1.9021, 1.8766]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype1-add-u-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.9039, 1.2088, 1.8604], [1.2746, 1.2420, 1.3948], [1.0041, 1.1837, 1.9837]]], ...8], [1.6212, 1.2659, 1.8469], [1.0423, 1.0793, 1.0338]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.0473, 1.8674, 1.8347]], [[1.1869, 1.5923, 1.1536]], [[1.4307, 1.6849, 1.7813]], ... [[1.2519, 1.4271, 1.1986]], [[1.5948, 1.4978, 1.0726]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype1-add-u-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.4492, 1.0472, 1.7897], [1.7059, 1.4095, 1.1656], [1.4365, 1.7435, 1.7607]]], ...3], [1.0161, 1.1641, 1.2765], [1.9876, 1.9702, 1.3058]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4381, 1.5484, 1.5701]], [[1.6824, 1.3452, 1.8506]], [[1.5706, 1.9608, 1.0699]], ... [[1.4500, 1.2843, 1.7690]], [[1.5950, 1.6498, 1.5661]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype1-add-u-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.3227, 1.2291, 1.3393], [1.7868, 1.9504, 1.1512], [1.6298, 1.3822, 1.3393], [1.8321,... 1.4614], [1.2820, 1.3785, 1.1502], [1.1314, 1.6627, 1.4948]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7733, 1.9940, 1.8581], [1.7789, 1.4331, 1.0407], [1.8122, 1.0511, 1.9251], [1.9997,... 1.8276], [1.6763, 1.0636, 1.0683], [1.5853, 1.3156, 1.3697]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype1-add-u-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.6364, 1.8343, 1.0332], [1.3506, 1.8756, 1.2626], [1.9546, 1.4373, 1.4615], [1.8129,... 1.9634], [1.5978, 1.3524, 1.6716], [1.9852, 1.5549, 1.0187]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5000, 1.4774, 1.7995], [1.2240, 1.2854, 1.4324], [1.5105, 1.0868, 1.1815], [1.6374,... 1.0983], [1.1758, 1.8476, 1.6485], [1.4133, 1.9991, 1.5480]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype1-add-u-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.6351], [1.9366], [1.3573], [1.5476], [1.2673], [1.3948], [1...032], [1.6637], [1.9603], [1.8138], [1.8834]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9794], [1.8160], [1.9009], [1.5220], [1.8801], [1.1373], [1...316], [1.4479], [1.9844], [1.6537], [1.2113]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype1-add-u-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1618], [1.1798], [1.2742], [1.5499], [1.8895], [1.9858], [1...835], [1.4085], [1.8906], [1.9375], [1.3631]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1687], [1.6901], [1.2331], [1.3742], [1.0971], [1.2780], [1...437], [1.6090], [1.7226], [1.9370], [1.7529]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_add_u) _____________________ test_sddmm[idtype1-add-v-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.2759], [1.3763], [1.8254]]], [[[1.1036], [1.8550], ...7]]], [[[1.2511], [1.6386], [1.6516]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7965]], [[1.8408]], [[1.1849]]]], [[[[1.6544]], [[1.566... [[[[1.5783]], [[1.2274]], [[1.5214]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype1-add-v-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.1234], [1.1129], [1.0583]]], [[[1.9270], [1.2250], ...0]]], [[[1.5501], [1.2429], [1.0326]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2150]], [[1.2113]], [[1.0876]]]], [[[[1.6120]], [[1.754... [[[[1.8177]], [[1.9799]], [[1.9595]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype1-add-v-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.9616, 1.3250, 1.1694, ..., 1.2429, 1.2902, 1.1506]], [[1.2153, 1.8921, 1.4752, ..., 1.3573,... [[1.2122, 1.1336, 1.4438, ..., 1.9343, 1.3701, 1.1420]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.3610, 1.5333, 1.8000, ..., 1.0707, 1.4869, 1.3097], [1.6478, 1.8728, 1.2781, ..., 1.5580, 1... [1.4602, 1.3757, 1.5316, ..., 1.7127, 1.0436, 1.2323]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype1-add-v-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.5992, 1.7324, 1.7361, ..., 1.2902, 1.6432, 1.4127]], [[1.8964, 1.7048, 1.9325, ..., 1.4847,... [[1.4973, 1.8664, 1.9865, ..., 1.2186, 1.5518, 1.6210]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1072, 1.2897, 1.6140, ..., 1.8711, 1.7574, 1.8624], [1.1230, 1.1707, 1.3297, ..., 1.6470, 1... [1.2288, 1.4526, 1.1731, ..., 1.3688, 1.7685, 1.0778]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype1-add-v-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.8757, 1.3527, 1.8474], [1.8032, 1.9593, 1.2079], [1.1150, 1.1603, 1.2374]]], ...0], [1.1163, 1.0629, 1.2497], [1.0586, 1.7478, 1.3502]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1893, 1.1923, 1.2180]], [[1.2348, 1.1854, 1.4189]], [[1.9971, 1.3223, 1.5105]], ... [[1.4583, 1.2554, 1.4599]], [[1.3967, 1.5470, 1.4219]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype1-add-v-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.2782, 1.6104, 1.2671], [1.4370, 1.1082, 1.7245], [1.9933, 1.9310, 1.5772]]], ...7], [1.1351, 1.3879, 1.7876], [1.5961, 1.6663, 1.2265]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8985, 1.9589, 1.4105]], [[1.5008, 1.3416, 1.6445]], [[1.3215, 1.5816, 1.4917]], ... [[1.0602, 1.2194, 1.3892]], [[1.7730, 1.2385, 1.0433]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype1-add-v-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.2645, 1.1337, 1.7420], [1.1715, 1.6651, 1.4458], [1.4852, 1.0119, 1.0427], [1.3778,... 1.9359], [1.1104, 1.1656, 1.7260], [1.2002, 1.0001, 1.5048]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6242, 1.0726, 1.6203], [1.9408, 1.3224, 1.7036], [1.0054, 1.3940, 1.4129], [1.2666,... 1.9846], [1.9936, 1.6940, 1.9017], [1.5016, 1.4799, 1.8082]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype1-add-v-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5833, 1.1756, 1.0194], [1.9846, 1.0712, 1.2492], [1.3490, 1.7131, 1.2302], [1.7925,... 1.2327], [1.5662, 1.2526, 1.6551], [1.4188, 1.6150, 1.7798]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1308, 1.3051, 1.1554], [1.2795, 1.1820, 1.4405], [1.0247, 1.1935, 1.4956], [1.3573,... 1.9886], [1.8133, 1.4897, 1.1417], [1.4957, 1.5050, 1.0287]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype1-add-v-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.6561], [1.7191], [1.2062], [1.9252], [1.7600], [1.5344], [1...061], [1.3335], [1.2665], [1.5747], [1.5915]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1429], [1.6412], [1.7583], [1.7666], [1.3087], [1.1981], [1...910], [1.1681], [1.1331], [1.9722], [1.6215]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype1-add-v-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.9532], [1.0102], [1.6925], [1.1873], [1.8773], [1.1254], [1...892], [1.9468], [1.8549], [1.3866], [1.3802]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4181], [1.0448], [1.2489], [1.8844], [1.4039], [1.7845], [1...211], [1.6263], [1.1343], [1.3712], [1.5317]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_add_v) _____________________ test_sddmm[idtype1-add-v-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.1146], [1.7684], [1.8266]]], [[[1.3946], [1.8078], ...7]]], [[[1.5341], [1.6418], [1.1312]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7498]], [[1.2046]], [[1.6757]]]], [[[[1.7208]], [[1.224... [[[[1.4508]], [[1.7058]], [[1.8671]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype1-add-v-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.9730], [1.9797], [1.2280]]], [[[1.0796], [1.7329], ...8]]], [[[1.0448], [1.9750], [1.3335]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5435]], [[1.7884]], [[1.7035]]]], [[[[1.4579]], [[1.503... [[[[1.3682]], [[1.7304]], [[1.8775]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype1-add-v-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.2434, 1.7485, 1.4575, ..., 1.0595, 1.9408, 1.3017]], [[1.0811, 1.7514, 1.2345, ..., 1.3628,... [[1.6862, 1.7738, 1.2545, ..., 1.7720, 1.4763, 1.8924]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1037, 1.8413, 1.3547, ..., 1.0432, 1.5343, 1.8118], [1.9101, 1.7309, 1.2263, ..., 1.4962, 1... [1.7990, 1.7387, 1.1957, ..., 1.6894, 1.6975, 1.0813]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype1-add-v-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.1781, 1.8104, 1.4167, ..., 1.7402, 1.7841, 1.2432]], [[1.2595, 1.5318, 1.4352, ..., 1.5960,... [[1.6245, 1.1906, 1.1987, ..., 1.2696, 1.0877, 1.7847]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1033, 1.2277, 1.2138, ..., 1.1396, 1.9349, 1.7216], [1.7964, 1.4656, 1.7572, ..., 1.3913, 1... [1.3547, 1.3458, 1.1348, ..., 1.7423, 1.4263, 1.1541]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype1-add-v-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.0624, 1.4986, 1.4694], [1.3361, 1.7209, 1.1186], [1.8727, 1.8659, 1.2851]]], ...2], [1.8533, 1.2509, 1.8916], [1.1403, 1.3029, 1.7535]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.0850, 1.5078, 1.1561]], [[1.9792, 1.5955, 1.8601]], [[1.3758, 1.0178, 1.9089]], ... [[1.6209, 1.8861, 1.4962]], [[1.1680, 1.2349, 1.6488]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype1-add-v-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.9571, 1.3955, 1.5467], [1.9316, 1.3681, 1.2087], [1.8756, 1.5289, 1.7945]]], ...0], [1.2124, 1.2538, 1.0605], [1.9247, 1.8526, 1.2114]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.3582, 1.1320, 1.3279]], [[1.5667, 1.7028, 1.9052]], [[1.9322, 1.8559, 1.6137]], ... [[1.4866, 1.1908, 1.5167]], [[1.0797, 1.1349, 1.9022]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype1-add-v-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5191, 1.4811, 1.2883], [1.7521, 1.4338, 1.1398], [1.8837, 1.6209, 1.2522], [1.9714,... 1.9972], [1.0341, 1.6540, 1.1054], [1.7554, 1.9855, 1.9266]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6207, 1.5643, 1.9972], [1.7160, 1.5222, 1.0550], [1.9263, 1.0072, 1.6982], [1.0538,... 1.7073], [1.3092, 1.9873, 1.7210], [1.5783, 1.1538, 1.1296]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype1-add-v-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1654, 1.8071, 1.8092], [1.4466, 1.1657, 1.4091], [1.2519, 1.2495, 1.5696], [1.4298,... 1.0394], [1.6665, 1.5216, 1.6141], [1.5416, 1.1146, 1.6650]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7269, 1.4707, 1.7936], [1.2871, 1.1603, 1.2706], [1.3134, 1.5496, 1.2597], [1.2935,... 1.0325], [1.1027, 1.4124, 1.8089], [1.3275, 1.7205, 1.4660]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype1-add-v-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.7608], [1.2294], [1.8637], [1.6588], [1.1241], [1.3427], [1...554], [1.5407], [1.7656], [1.9060], [1.2256]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1851], [1.2650], [1.1349], [1.9278], [1.5218], [1.2316], [1...978], [1.9687], [1.4469], [1.1233], [1.7872]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype1-add-v-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.3876], [1.9929], [1.3771], [1.4147], [1.6702], [1.2728], [1...280], [1.8772], [1.3917], [1.5684], [1.0805]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1477], [1.0744], [1.9818], [1.5123], [1.8177], [1.4724], [1...269], [1.1013], [1.0969], [1.8492], [1.3295]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_add_v) _____________________ test_sddmm[idtype1-add-e-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.0053], [1.7804], [1.6319]]], [[[1.9662], [1.4485], ...7]]], [[[1.5787], [1.2895], [1.7345]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5193]], [[1.4445]], [[1.0664]]]], [[[[1.2708]], [[1.306... [[[[1.3641]], [[1.4687]], [[1.6581]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype1-add-e-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.2054], [1.9421], [1.1976]]], [[[1.2240], [1.5792], ...4]]], [[[1.1710], [1.4424], [1.5743]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3623]], [[1.4797]], [[1.7773]]]], [[[[1.4888]], [[1.624... [[[[1.9893]], [[1.8137]], [[1.3094]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype1-add-e-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.8333, 1.8507, 1.6303, ..., 1.0348, 1.3060, 1.2684]], [[1.2158, 1.9658, 1.8782, ..., 1.9968,... [[1.5517, 1.3615, 1.5388, ..., 1.7414, 1.5114, 1.0358]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.3245, 1.0786, 1.4657, ..., 1.6911, 1.2880, 1.3843], [1.3764, 1.4646, 1.2317, ..., 1.4859, 1... [1.1218, 1.2017, 1.1513, ..., 1.8895, 1.8786, 1.7025]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype1-add-e-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.1426, 1.8587, 1.6395, ..., 1.1051, 1.9792, 1.9669]], [[1.9704, 1.7791, 1.4883, ..., 1.9561,... [[1.6509, 1.8679, 1.7501, ..., 1.1128, 1.6782, 1.5815]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8804, 1.6917, 1.7911, ..., 1.6810, 1.4966, 1.1863], [1.1276, 1.6891, 1.9992, ..., 1.2787, 1... [1.8118, 1.2794, 1.4323, ..., 1.8533, 1.4293, 1.6536]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype1-add-e-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.3524, 1.6432, 1.9267], [1.8275, 1.4220, 1.2313], [1.3328, 1.3519, 1.2628]]], ...4], [1.3974, 1.7028, 1.4401], [1.5274, 1.5624, 1.1759]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1679, 1.2677, 1.9612]], [[1.7909, 1.0585, 1.6631]], [[1.8006, 1.7809, 1.8786]], ... [[1.1210, 1.0931, 1.2646]], [[1.8463, 1.2050, 1.7938]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype1-add-e-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.4916, 1.7195, 1.2323], [1.5563, 1.3816, 1.7122], [1.3958, 1.2980, 1.3259]]], ...4], [1.8233, 1.3874, 1.3650], [1.9273, 1.0656, 1.6052]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.2484, 1.7657, 1.9997]], [[1.8718, 1.2602, 1.9289]], [[1.1586, 1.6986, 1.1465]], ... [[1.8887, 1.7340, 1.5588]], [[1.1147, 1.8705, 1.7062]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype1-add-e-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.6416, 1.0958, 1.7409], [1.0392, 1.6610, 1.6641], [1.2580, 1.5950, 1.4496], [1.7508,... 1.6311], [1.2313, 1.3431, 1.1137], [1.3299, 1.9229, 1.8152]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6670, 1.4466, 1.2826], [1.3144, 1.2591, 1.6553], [1.2251, 1.6580, 1.8085], [1.0381,... 1.7061], [1.6886, 1.9629, 1.0166], [1.4792, 1.7211, 1.0570]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype1-add-e-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.4878, 1.5795, 1.1755], [1.7800, 1.8636, 1.7180], [1.0097, 1.1239, 1.4087], [1.0143,... 1.9595], [1.3204, 1.1145, 1.0414], [1.7340, 1.2666, 1.8919]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5303, 1.1038, 1.6339], [1.6315, 1.5851, 1.0045], [1.1619, 1.3706, 1.8699], [1.4778,... 1.7528], [1.2638, 1.0320, 1.1046], [1.4972, 1.5621, 1.7183]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype1-add-e-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1547], [1.2152], [1.0983], [1.5987], [1.6596], [1.4047], [1...827], [1.7516], [1.5310], [1.0022], [1.6837]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2879], [1.5099], [1.4739], [1.6035], [1.4394], [1.4727], [1...754], [1.7084], [1.0721], [1.1669], [1.5342]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype1-add-e-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.0345], [1.5694], [1.4498], [1.1511], [1.9972], [1.7766], [1...832], [1.7728], [1.4430], [1.7514], [1.7721]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6846], [1.8675], [1.5406], [1.1838], [1.0617], [1.5683], [1...033], [1.8840], [1.1895], [1.8599], [1.5083]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_add_e) _____________________ test_sddmm[idtype1-add-e-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.7166], [1.0125], [1.9982]]], [[[1.5105], [1.0188], ...5]]], [[[1.7876], [1.1723], [1.4712]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5437]], [[1.1858]], [[1.3519]]]], [[[[1.4843]], [[1.323... [[[[1.9395]], [[1.8879]], [[1.9716]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype1-add-e-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.4652], [1.4014], [1.2784]]], [[[1.7718], [1.9749], ...2]]], [[[1.6112], [1.9120], [1.0811]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7643]], [[1.5711]], [[1.5727]]]], [[[[1.0619]], [[1.848... [[[[1.4383]], [[1.6854]], [[1.7106]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype1-add-e-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.9528, 1.9638, 1.1179, ..., 1.0546, 1.3443, 1.2611]], [[1.7479, 1.8338, 1.1627, ..., 1.2441,... [[1.4377, 1.6141, 1.5060, ..., 1.2473, 1.0602, 1.6031]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.5676, 1.6662, 1.3185, ..., 1.7996, 1.2741, 1.1561], [1.0577, 1.4773, 1.0189, ..., 1.6398, 1... [1.3247, 1.7295, 1.9057, ..., 1.4371, 1.7640, 1.3646]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype1-add-e-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'add', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.9412, 1.2174, 1.0288, ..., 1.6425, 1.6423, 1.7061]], [[1.3762, 1.4356, 1.2509, ..., 1.6601,... [[1.4885, 1.3210, 1.6336, ..., 1.7804, 1.0740, 1.5944]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9288, 1.9025, 1.0017, ..., 1.8073, 1.2327, 1.5438], [1.6093, 1.9591, 1.4688, ..., 1.1794, 1... [1.8248, 1.2181, 1.5041, ..., 1.6413, 1.8539, 1.4402]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype1-add-e-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.3057, 1.3358, 1.6410], [1.5787, 1.4799, 1.0859], [1.9504, 1.7084, 1.5230]]], ...1], [1.1970, 1.2981, 1.1247], [1.1059, 1.9311, 1.1091]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9542, 1.3705, 1.0056]], [[1.1461, 1.3292, 1.3641]], [[1.9008, 1.9733, 1.9780]], ... [[1.7357, 1.6844, 1.1678]], [[1.3068, 1.6040, 1.8751]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype1-add-e-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.4017, 1.7608, 1.5787], [1.6960, 1.5984, 1.5215], [1.9810, 1.8388, 1.5699]]], ...5], [1.4491, 1.4456, 1.5457], [1.4297, 1.2485, 1.9296]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.7159, 1.4648, 1.4866]], [[1.2840, 1.9872, 1.6424]], [[1.3253, 1.9569, 1.2142]], ... [[1.8874, 1.3427, 1.8343]], [[1.3518, 1.8188, 1.4617]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype1-add-e-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8753, 1.0464, 1.4332], [1.7454, 1.9349, 1.5659], [1.7439, 1.1389, 1.0809], [1.8452,... 1.3128], [1.8697, 1.6044, 1.8984], [1.9075, 1.7643, 1.4044]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8388, 1.3841, 1.6436], [1.6505, 1.6263, 1.9125], [1.3973, 1.0063, 1.8325], [1.9237,... 1.6438], [1.9051, 1.6129, 1.0077], [1.9544, 1.0961, 1.0564]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype1-add-e-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5695, 1.8482, 1.4711], [1.2731, 1.3974, 1.4041], [1.0698, 1.6518, 1.4825], [1.8847,... 1.3981], [1.4921, 1.2392, 1.8893], [1.3390, 1.0323, 1.5885]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3723, 1.6890, 1.8322], [1.5101, 1.8706, 1.7367], [1.4249, 1.6973, 1.0243], [1.4492,... 1.2356], [1.0756, 1.2438, 1.3293], [1.1815, 1.6957, 1.8840]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype1-add-e-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8148], [1.9720], [1.2225], [1.4484], [1.0708], [1.9389], [1...377], [1.2912], [1.9396], [1.1191], [1.6907]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6715], [1.7463], [1.3158], [1.9026], [1.1690], [1.2882], [1...560], [1.2269], [1.6553], [1.6649], [1.8677]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype1-add-e-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'add' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.2780], [1.8581], [1.4071], [1.0348], [1.1021], [1.9189], [1...021], [1.3575], [1.2928], [1.5549], [1.2882]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4866], [1.9414], [1.8698], [1.5752], [1.6475], [1.7066], [1...327], [1.6885], [1.2074], [1.5713], [1.2050]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_add_e) _____________________ test_sddmm[idtype1-sub-u-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.1795], [1.2445], [1.3854]]], [[[1.4870], [1.4548], ...4]]], [[[1.6869], [1.5282], [1.4581]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.7679]], [[-1.8316]], [[-1.1894]]]], [[[[-1.9721]], [[-... [[[[-1.9645]], [[-1.5439]], [[-1.2592]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype1-sub-u-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.7745], [1.6942], [1.2201]]], [[[1.5010], [1.1492], ...0]]], [[[1.5129], [1.8245], [1.4573]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.2053]], [[-1.7442]], [[-1.9906]]]], [[[[-1.1710]], [[-... [[[[-1.6080]], [[-1.7713]], [[-1.1877]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype1-sub-u-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.2112, 1.6402, 1.8733, ..., 1.1528, 1.1314, 1.7905]], [[1.5433, 1.4121, 1.1877, ..., 1.2342,... [[1.2173, 1.0743, 1.3142, ..., 1.9577, 1.3387, 1.3137]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.1166, -1.2127, -1.5396, ..., -1.1722, -1.5038, -1.4897], [-1.4282, -1.9482, -1.5794, ..., ... [-1.7182, -1.3709, -1.0238, ..., -1.4181, -1.4262, -1.6308]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype1-sub-u-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.8975, 1.1037, 1.7579, ..., 1.3435, 1.8576, 1.9476]], [[1.5955, 1.8504, 1.5567, ..., 1.9212,... [[1.6284, 1.0826, 1.4157, ..., 1.4065, 1.8299, 1.8006]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.1811, -1.3984, -1.4694, ..., -1.3201, -1.1436, -1.8778], [-1.2517, -1.2074, -1.7647, ..., ... [-1.2746, -1.8346, -1.9630, ..., -1.8238, -1.7100, -1.4371]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype1-sub-u-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.8155, 1.9733, 1.9601], [1.1673, 1.6327, 1.6664], [1.9783, 1.1182, 1.4094]]], ...9], [1.4658, 1.2297, 1.8897], [1.9914, 1.6260, 1.3477]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.2463, -1.1755, -1.2695]], [[-1.8347, -1.3264, -1.2830]], [[-1.2136, -1.1117, -1.8637...1240, -1.0793, -1.6351]], [[-1.2996, -1.6086, -1.6366]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype1-sub-u-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.3369, 1.7815, 1.2619], [1.2740, 1.8385, 1.6761], [1.4766, 1.4518, 1.7626]]], ...6], [1.1029, 1.7361, 1.5116], [1.0777, 1.9530, 1.9462]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.9952, -1.3456, -1.0052]], [[-1.3023, -1.6300, -1.0370]], [[-1.9797, -1.5283, -1.7989...7164, -1.3998, -1.9559]], [[-1.1417, -1.7327, -1.7614]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype1-sub-u-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.3548, 1.6724, 1.2639], [1.2232, 1.1428, 1.5033], [1.1367, 1.7010, 1.0270], [1.3830,... 1.3659], [1.6541, 1.0976, 1.9485], [1.4686, 1.5412, 1.2845]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.9622, -1.0297, -1.7694], [-1.5773, -1.8425, -1.8534], [-1.9259, -1.5715, -1.2766], ... [-1.8178, -1.4289, -1.1438], [-1.1341, -1.8094, -1.3500]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype1-sub-u-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.2998, 1.2458, 1.1229], [1.9411, 1.4997, 1.4892], [1.2512, 1.8789, 1.1003], [1.8518,... 1.4803], [1.7580, 1.4317, 1.2687], [1.4405, 1.8234, 1.1340]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.6268, -1.6755, -1.3902], [-1.7552, -1.3351, -1.2151], [-1.9100, -1.4768, -1.2976], ... [-1.0272, -1.9719, -1.6333], [-1.9035, -1.4353, -1.3766]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype1-sub-u-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.3567], [1.7589], [1.2408], [1.9908], [1.8823], [1.2824], [1...274], [1.6649], [1.3238], [1.9560], [1.0259]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8992], [-1.0755], [-1.3164], [-1.1441], [-1.6808], [-1.7479], ... [-1.2042], [-1.5258], [-1.6347], [-1.1022]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype1-sub-u-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.4615], [1.6912], [1.4956], [1.4110], [1.6425], [1.9516], [1...956], [1.0059], [1.1095], [1.5681], [1.0757]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.6238], [-1.6586], [-1.1293], [-1.3257], [-1.9223], [-1.4091], ... [-1.7848], [-1.7967], [-1.3291], [-1.0773]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_sub_u) _____________________ test_sddmm[idtype1-sub-u-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.5645], [1.5317], [1.2214]]], [[[1.5989], [1.9585], ...1]]], [[[1.5152], [1.8388], [1.9746]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.3473]], [[-1.0661]], [[-1.8359]]]], [[[[-1.9997]], [[-... [[[[-1.9918]], [[-1.3307]], [[-1.7427]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype1-sub-u-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.1991], [1.0801], [1.5652]]], [[[1.1688], [1.3135], ...5]]], [[[1.5099], [1.2017], [1.2282]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.5831]], [[-1.9279]], [[-1.0280]]]], [[[[-1.4511]], [[-... [[[[-1.2122]], [[-1.2279]], [[-1.9157]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype1-sub-u-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.2415, 1.6276, 1.6331, ..., 1.2767, 1.4382, 1.4394]], [[1.6485, 1.0773, 1.3831, ..., 1.4612,... [[1.2428, 1.8404, 1.5045, ..., 1.8584, 1.6209, 1.5052]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.6642, -1.3548, -1.7054, ..., -1.4920, -1.4596, -1.6399], [-1.4950, -1.1548, -1.9028, ..., ... [-1.8430, -1.1306, -1.7906, ..., -1.0242, -1.7503, -1.8562]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype1-sub-u-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.5050, 1.5502, 1.0872, ..., 1.6453, 1.5792, 1.4944]], [[1.4555, 1.8809, 1.4106, ..., 1.2840,... [[1.2984, 1.8286, 1.0988, ..., 1.9322, 1.8762, 1.4155]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.2426, -1.7832, -1.8431, ..., -1.4781, -1.0629, -1.6853], [-1.5422, -1.9730, -1.8806, ..., ... [-1.1050, -1.9451, -1.4447, ..., -1.7610, -1.0656, -1.7471]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype1-sub-u-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.5235, 1.5122, 1.3541], [1.2758, 1.8799, 1.7244], [1.3875, 1.6798, 1.4945]]], ...1], [1.2899, 1.8673, 1.8388], [1.6174, 1.0995, 1.3016]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.5328, -1.6652, -1.6618]], [[-1.9245, -1.7228, -1.1234]], [[-1.1985, -1.1065, -1.2348...2727, -1.8309, -1.6680]], [[-1.2651, -1.8905, -1.4274]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype1-sub-u-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.3092, 1.0459, 1.6709], [1.8223, 1.0758, 1.8456], [1.7942, 1.3586, 1.2927]]], ...1], [1.6282, 1.2192, 1.7107], [1.3478, 1.8580, 1.4100]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.2527, -1.8022, -1.9185]], [[-1.4580, -1.3525, -1.8126]], [[-1.9765, -1.6055, -1.0420...3707, -1.5319, -1.8480]], [[-1.8877, -1.6211, -1.8737]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype1-sub-u-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5291, 1.9632, 1.8953], [1.2442, 1.6081, 1.1752], [1.5645, 1.7444, 1.4179], [1.5383,... 1.9451], [1.0789, 1.9217, 1.2273], [1.5545, 1.0784, 1.3065]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.5602, -1.3264, -1.3103], [-1.0196, -1.5806, -1.8802], [-1.9131, -1.1947, -1.8721], ... [-1.2449, -1.9987, -1.6683], [-1.0360, -1.8585, -1.6127]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype1-sub-u-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.2152, 1.3666, 1.2668], [1.3625, 1.2008, 1.9899], [1.7475, 1.7672, 1.2782], [1.9977,... 1.9951], [1.5960, 1.6139, 1.9031], [1.5687, 1.9061, 1.9211]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7142, -1.2973, -1.7765], [-1.2422, -1.1643, -1.2007], [-1.0732, -1.9983, -1.9500], ... [-1.6947, -1.9732, -1.6444], [-1.2798, -1.4979, -1.2885]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype1-sub-u-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.8600], [1.6807], [1.0031], [1.4744], [1.1900], [1.0911], [1...304], [1.7670], [1.0391], [1.4124], [1.4834]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.1282], [-1.0110], [-1.3812], [-1.9968], [-1.9724], [-1.6613], ... [-1.0716], [-1.2790], [-1.7579], [-1.5015]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype1-sub-u-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.2060], [1.7790], [1.3621], [1.4128], [1.6326], [1.0460], [1...888], [1.2157], [1.3723], [1.6692], [1.9861]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.1070], [-1.9880], [-1.9932], [-1.9291], [-1.9676], [-1.1745], ... [-1.9045], [-1.9337], [-1.9633], [-1.8735]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_sub_u) _____________________ test_sddmm[idtype1-sub-v-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.5469], [1.1286], [1.7467]]], [[[1.9825], [1.4169], ...0]]], [[[1.1366], [1.3316], [1.9732]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.0357]], [[-1.7126]], [[-1.9189]]]], [[[[-1.7302]], [[-... [[[[-1.2794]], [[-1.1734]], [[-1.9419]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype1-sub-v-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.9626], [1.8952], [1.0755]]], [[[1.3119], [1.3008], ...0]]], [[[1.3230], [1.0573], [1.4648]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.0666]], [[-1.0623]], [[-1.9948]]]], [[[[-1.7551]], [[-... [[[[-1.4088]], [[-1.5472]], [[-1.6737]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype1-sub-v-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.0916, 1.6394, 1.9499, ..., 1.7324, 1.0636, 1.7753]], [[1.1012, 1.3743, 1.8074, ..., 1.0851,... [[1.6344, 1.8453, 1.5683, ..., 1.5978, 1.4301, 1.5283]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.9733, -1.6405, -1.8963, ..., -1.9427, -1.3973, -1.4310], [-1.1689, -1.3353, -1.0083, ..., ... [-1.0799, -1.5754, -1.5704, ..., -1.2054, -1.0211, -1.8702]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype1-sub-v-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.6782, 1.7972, 1.3928, ..., 1.3512, 1.3215, 1.7546]], [[1.4781, 1.7571, 1.7869, ..., 1.6011,... [[1.2183, 1.6723, 1.3355, ..., 1.1812, 1.2144, 1.4293]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.3361, -1.0273, -1.8206, ..., -1.1408, -1.4483, -1.3585], [-1.5306, -1.7053, -1.4878, ..., ... [-1.5916, -1.9594, -1.4334, ..., -1.2051, -1.1727, -1.9359]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype1-sub-v-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.0145, 1.1180, 1.4647], [1.2802, 1.8468, 1.9549], [1.2540, 1.1403, 1.0721]]], ...1], [1.1646, 1.4387, 1.0399], [1.3295, 1.5346, 1.3773]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.6538, -1.1548, -1.2536]], [[-1.9515, -1.3072, -1.4614]], [[-1.4512, -1.5941, -1.7223...7456, -1.4132, -1.4245]], [[-1.0464, -1.8621, -1.7405]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype1-sub-v-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.9611, 1.6974, 1.1459], [1.6558, 1.9671, 1.4280], [1.6449, 1.6687, 1.7912]]], ...3], [1.0329, 1.7177, 1.0228], [1.0800, 1.2412, 1.5783]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.0484, -1.3139, -1.2293]], [[-1.3943, -1.3584, -1.6992]], [[-1.0724, -1.4368, -1.6966...6049, -1.3551, -1.1730]], [[-1.9240, -1.2171, -1.6515]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype1-sub-v-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.6724, 1.4218, 1.6506], [1.5510, 1.7182, 1.9379], [1.6494, 1.7588, 1.8375], [1.5015,... 1.4739], [1.2903, 1.8275, 1.9033], [1.4139, 1.7342, 1.0533]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7669, -1.4504, -1.7368], [-1.1404, -1.7953, -1.3462], [-1.1395, -1.8815, -1.5919], ... [-1.5007, -1.6308, -1.5976], [-1.1261, -1.7114, -1.7212]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype1-sub-v-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.3160, 1.5149, 1.6110], [1.4937, 1.9326, 1.2580], [1.0883, 1.4589, 1.1909], [1.4441,... 1.7348], [1.2015, 1.8781, 1.4183], [1.0987, 1.8815, 1.5452]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.2148, -1.0419, -1.2661], [-1.5703, -1.3522, -1.2725], [-1.1222, -1.6309, -1.8740], ... [-1.9592, -1.8316, -1.0700], [-1.0258, -1.6824, -1.5050]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype1-sub-v-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.3684], [1.7203], [1.6244], [1.5457], [1.1011], [1.6327], [1...188], [1.1351], [1.9788], [1.8242], [1.5584]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.2757], [-1.3728], [-1.3119], [-1.1470], [-1.8669], [-1.5417], ... [-1.3810], [-1.9403], [-1.8139], [-1.6507]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype1-sub-v-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.9255], [1.7186], [1.4539], [1.9214], [1.6388], [1.8467], [1...861], [1.9113], [1.7273], [1.4800], [1.2690]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7849], [-1.9287], [-1.1195], [-1.0549], [-1.1682], [-1.4818], ... [-1.5995], [-1.7929], [-1.6663], [-1.6964]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_sub_v) _____________________ test_sddmm[idtype1-sub-v-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.2046], [1.8173], [1.3427]]], [[[1.2721], [1.7787], ...5]]], [[[1.3389], [1.2144], [1.8694]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.0346]], [[-1.2330]], [[-1.6298]]]], [[[[-1.9067]], [[-... [[[[-1.9807]], [[-1.4770]], [[-1.0401]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype1-sub-v-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.5669], [1.2279], [1.4558]]], [[[1.4621], [1.2711], ...6]]], [[[1.7846], [1.4215], [1.5848]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.9462]], [[-1.9526]], [[-1.5376]]]], [[[[-1.1777]], [[-... [[[[-1.6566]], [[-1.6652]], [[-1.1678]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype1-sub-v-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.6682, 1.4507, 1.0613, ..., 1.7389, 1.2368, 1.8955]], [[1.3401, 1.7821, 1.1081, ..., 1.8836,... [[1.2306, 1.6741, 1.9921, ..., 1.9746, 1.5470, 1.2583]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.5709, -1.6898, -1.6861, ..., -1.9487, -1.3783, -1.7146], [-1.8861, -1.0851, -1.6838, ..., ... [-1.4777, -1.9619, -1.9812, ..., -1.5858, -1.3032, -1.9006]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype1-sub-v-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.3356, 1.1504, 1.5126, ..., 1.6765, 1.2137, 1.3489]], [[1.6461, 1.9470, 1.8173, ..., 1.8891,... [[1.8952, 1.7025, 1.1815, ..., 1.5108, 1.3279, 1.9652]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.2399, -1.1078, -1.0195, ..., -1.9529, -1.3178, -1.7421], [-1.6048, -1.0769, -1.4265, ..., ... [-1.7443, -1.7579, -1.9766, ..., -1.6016, -1.8456, -1.3396]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype1-sub-v-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.0810, 1.0268, 1.5138], [1.1740, 1.2205, 1.1645], [1.0390, 1.7951, 1.4311]]], ...9], [1.9239, 1.2098, 1.2818], [1.5400, 1.6357, 1.3263]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.5953, -1.5838, -1.1075]], [[-1.0709, -1.2633, -1.6062]], [[-1.6404, -1.8070, -1.7814...5158, -1.0803, -1.2692]], [[-1.6735, -1.3252, -1.8462]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype1-sub-v-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.4402, 1.9725, 1.2595], [1.0845, 1.7408, 1.1711], [1.1753, 1.0306, 1.9043]]], ...3], [1.2083, 1.9528, 1.9350], [1.8106, 1.4528, 1.0789]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.2812, -1.8389, -1.8139]], [[-1.7799, -1.6884, -1.7673]], [[-1.3090, -1.5301, -1.7467...7515, -1.1713, -1.6013]], [[-1.6686, -1.1697, -1.4757]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype1-sub-v-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.2177, 1.0173, 1.8710], [1.8871, 1.4363, 1.3116], [1.9540, 1.1527, 1.5144], [1.9934,... 1.7932], [1.0310, 1.3584, 1.7513], [1.9493, 1.3276, 1.7810]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.0269, -1.7930, -1.5813], [-1.0415, -1.3892, -1.4178], [-1.3731, -1.3610, -1.1991], ... [-1.5330, -1.3855, -1.8701], [-1.1839, -1.5091, -1.7912]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype1-sub-v-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1562, 1.5643, 1.2543], [1.5006, 1.8305, 1.2538], [1.9485, 1.0129, 1.1352], [1.6267,... 1.1546], [1.6467, 1.0580, 1.0669], [1.7478, 1.6475, 1.9085]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.6627, -1.0032, -1.6440], [-1.6099, -1.2508, -1.6476], [-1.3672, -1.0637, -1.8628], ... [-1.0166, -1.4397, -1.7026], [-1.1858, -1.3122, -1.7349]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype1-sub-v-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.2161], [1.2216], [1.7045], [1.7284], [1.3526], [1.7305], [1...586], [1.3848], [1.3075], [1.9723], [1.4654]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.8555], [-1.6936], [-1.5644], [-1.6055], [-1.4712], [-1.4152], ... [-1.6496], [-1.2822], [-1.2718], [-1.4357]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype1-sub-v-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.4931], [1.1264], [1.8713], [1.4827], [1.5839], [1.5511], [1...610], [1.3995], [1.5760], [1.2832], [1.1544]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.2032], [-1.5582], [-1.0949], [-1.1938], [-1.7044], [-1.4414], ... [-1.0361], [-1.4262], [-1.5480], [-1.6794]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_sub_v) _____________________ test_sddmm[idtype1-sub-e-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.9526], [1.2947], [1.9650]]], [[[1.2917], [1.4245], ...8]]], [[[1.7272], [1.8195], [1.3735]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.9536]], [[-1.2107]], [[-1.9437]]]], [[[[-1.8186]], [[-... [[[[-1.7144]], [[-1.7417]], [[-1.1721]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype1-sub-e-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.3293], [1.2561], [1.9354]]], [[[1.3691], [1.9942], ...6]]], [[[1.1086], [1.4032], [1.4583]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.2943]], [[-1.9933]], [[-1.9813]]]], [[[[-1.1802]], [[-... [[[[-1.4038]], [[-1.2828]], [[-1.2383]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype1-sub-e-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.8181, 1.1007, 1.6485, ..., 1.1239, 1.7598, 1.3682]], [[1.9904, 1.9557, 1.4140, ..., 1.7244,... [[1.4133, 1.9303, 1.9831, ..., 1.8528, 1.4536, 1.4157]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.7224, -1.8535, -1.9622, ..., -1.3631, -1.9488, -1.4833], [-1.8803, -1.4743, -1.9308, ..., ... [-1.2151, -1.7222, -1.2751, ..., -1.9015, -1.1622, -1.1543]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype1-sub-e-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.9747, 1.1306, 1.2680, ..., 1.6113, 1.6816, 1.4729]], [[1.7334, 1.5350, 1.6061, ..., 1.3686,... [[1.1151, 1.6032, 1.5638, ..., 1.5715, 1.8865, 1.2726]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.6307, -1.8897, -1.5502, ..., -1.0348, -1.4697, -1.5671], [-1.6579, -1.3180, -1.4142, ..., ... [-1.0250, -1.3774, -1.0580, ..., -1.0943, -1.2854, -1.9786]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype1-sub-e-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.6601, 1.4979, 1.5408], [1.1230, 1.0890, 1.8146], [1.1215, 1.1028, 1.1390]]], ...2], [1.7819, 1.8706, 1.8283], [1.1883, 1.9107, 1.0158]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.7886, -1.5813, -1.3102]], [[-1.0811, -1.8758, -1.0925]], [[-1.8076, -1.3486, -1.9381...0266, -1.9579, -1.3061]], [[-1.6022, -1.4367, -1.1829]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype1-sub-e-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.9101, 1.2677, 1.8874], [1.6738, 1.3741, 1.2490], [1.5970, 1.2650, 1.1849]]], ...6], [1.8942, 1.3690, 1.9710], [1.3430, 1.2749, 1.3995]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.6080, -1.5515, -1.4512]], [[-1.8503, -1.2060, -1.2013]], [[-1.3376, -1.0801, -1.9751...8564, -1.5686, -1.2084]], [[-1.1603, -1.6134, -1.7815]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype1-sub-e-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5193, 1.2062, 1.5326], [1.0477, 1.5651, 1.7073], [1.1433, 1.8500, 1.3251], [1.8684,... 1.6824], [1.9680, 1.4964, 1.6993], [1.8219, 1.2446, 1.3764]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.6169, -1.2209, -1.4318], [-1.1836, -1.7932, -1.6829], [-1.0858, -1.7381, -1.9354], ... [-1.3118, -1.6105, -1.2196], [-1.3870, -1.0330, -1.3073]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype1-sub-e-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.5219, 1.7176, 1.7205], [1.2934, 1.5280, 1.5746], [1.5783, 1.5302, 1.4059], [1.2328,... 1.4355], [1.5700, 1.9383, 1.9214], [1.0208, 1.8359, 1.0045]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.3384, -1.8749, -1.4861], [-1.5074, -1.0558, -1.8679], [-1.9392, -1.8479, -1.0750], ... [-1.7422, -1.0498, -1.6112], [-1.7571, -1.8581, -1.0128]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype1-sub-e-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.1544], [1.4232], [1.1240], [1.6978], [1.6964], [1.3038], [1...638], [1.2853], [1.2546], [1.6617], [1.8876]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.4148], [-1.5413], [-1.0198], [-1.2588], [-1.8079], [-1.7418], ... [-1.3995], [-1.4208], [-1.2292], [-1.5645]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype1-sub-e-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.3745], [1.4401], [1.1289], [1.6580], [1.0698], [1.3260], [1...450], [1.8924], [1.5668], [1.6685], [1.4259]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.5056], [-1.6474], [-1.6680], [-1.3557], [-1.0207], [-1.2172], ... [-1.2641], [-1.5277], [-1.0542], [-1.9554]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_sub_e) _____________________ test_sddmm[idtype1-sub-e-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.4056], [1.5701], [1.1921]]], [[[1.3110], [1.0473], ...9]]], [[[1.5644], [1.8543], [1.9310]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.4156]], [[-1.8976]], [[-1.1221]]]], [[[[-1.2603]], [[-... [[[[-1.6568]], [[-1.6863]], [[-1.5126]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype1-sub-e-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[[1.6560], [1.8678], [1.9362]]], [[[1.0783], [1.7030], ...7]]], [[[1.6662], [1.0966], [1.2753]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[-1.7283]], [[-1.9641]], [[-1.2553]]]], [[[[-1.0931]], [[-... [[[[-1.4369]], [[-1.7470]], [[-1.4667]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype1-sub-e-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.8771, 1.3065, 1.2376, ..., 1.9291, 1.7646, 1.2378]], [[1.3224, 1.0322, 1.1939, ..., 1.0556,... [[1.2776, 1.8818, 1.7425, ..., 1.6335, 1.0581, 1.7346]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.1554, -1.1808, -1.7767, ..., -1.2359, -1.8785, -1.6293], [-1.0831, -1.7309, -1.0048, ..., ... [-1.8876, -1.4873, -1.9862, ..., -1.0448, -1.3683, -1.2352]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype1-sub-e-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'sub', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[[1.7421, 1.9351, 1.2814, ..., 1.8339, 1.9784, 1.6842]], [[1.8969, 1.6111, 1.2531, ..., 1.5094,... [[1.9547, 1.7263, 1.5784, ..., 1.0959, 1.8942, 1.1778]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[-1.8871, -1.0097, -1.8559, ..., -1.9213, -1.5116, -1.6298], [-1.5807, -1.0560, -1.5736, ..., ... [-1.0887, -1.6643, -1.9875, ..., -1.3280, -1.6337, -1.2682]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype1-sub-e-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.4523, 1.2404, 1.1998], [1.3882, 1.6579, 1.0651], [1.6645, 1.1100, 1.3140]]], ...2], [1.7227, 1.6477, 1.3170], [1.2473, 1.3413, 1.3127]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.5020, -1.6302, -1.1276]], [[-1.8809, -1.7202, -1.5407]], [[-1.9171, -1.5923, -1.0611...1141, -1.8167, -1.0931]], [[-1.8856, -1.0791, -1.8737]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype1-sub-e-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[[1.6281, 1.9924, 1.6818], [1.1010, 1.7657, 1.3831], [1.6821, 1.5664, 1.9120]]], ...7], [1.9046, 1.4928, 1.8350], [1.7803, 1.1957, 1.4271]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[-1.3609, -1.0262, -1.5249]], [[-1.0272, -1.8908, -1.1984]], [[-1.0240, -1.9879, -1.8948...8123, -1.0174, -1.7080]], [[-1.3416, -1.2324, -1.3625]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype1-sub-e-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.6483, 1.4758, 1.7637], [1.4423, 1.4533, 1.6903], [1.8065, 1.1851, 1.8254], [1.6056,... 1.1103], [1.9560, 1.8169, 1.2261], [1.9788, 1.9089, 1.4076]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.7707, -1.0283, -1.0313], [-1.8328, -1.3360, -1.2937], [-1.5996, -1.8474, -1.1870], ... [-1.9474, -1.8660, -1.1462], [-1.3228, -1.1862, -1.0976]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype1-sub-e-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.0714, 1.6349, 1.6321], [1.8641, 1.0605, 1.5391], [1.5364, 1.5679, 1.8272], [1.5644,... 1.4511], [1.9220, 1.9167, 1.7309], [1.3592, 1.1486, 1.9225]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.1548, -1.6136, -1.8671], [-1.2796, -1.3754, -1.7014], [-1.0747, -1.4451, -1.7041], ... [-1.8445, -1.5779, -1.5459], [-1.8664, -1.0707, -1.5644]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype1-sub-e-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.6762], [1.1178], [1.0401], [1.5047], [1.2785], [1.5639], [1...310], [1.7694], [1.2187], [1.0044], [1.0395]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.6668], [-1.9723], [-1.6517], [-1.2654], [-1.8268], [-1.4945], ... [-1.7611], [-1.4391], [-1.4630], [-1.8685]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype1-sub-e-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'sub' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[1.7641], [1.6165], [1.8629], [1.7053], [1.8771], [1.8627], [1...323], [1.1062], [1.3414], [1.8614], [1.8025]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[-1.5352], [-1.8544], [-1.0516], [-1.7146], [-1.8005], [-1.0210], ... [-1.0909], [-1.3832], [-1.6541], [-1.2847]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_sub_e) _____________________ test_sddmm[idtype1-mul-u-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.7787], [1.4521], [1.2007]]], [[[1.5562], [1.3736], ...1]]], [[[1.7512], [1.9998], [1.5599]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7476]], [[1.8222]], [[1.4793]]]], [[[[1.0911]], [[1.341... [[[[1.6953]], [[1.2448]], [[1.3325]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype1-mul-u-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.5668], [1.3724], [1.5403]]], [[[1.9326], [1.6528], ...4]]], [[[1.5961], [1.5779], [1.7453]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7751]], [[1.4951]], [[1.7505]]]], [[[[1.5637]], [[1.147... [[[[1.1727]], [[1.6577]], [[1.8926]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype1-mul-u-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.6669, 1.4709, 1.3502, ..., 1.6760, 1.9194, 1.7501]], [[1.7190, 1.7599, 1.5738, ..., 1.7121,... [[1.9303, 1.2870, 1.0325, ..., 1.3675, 1.8683, 1.1418]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4817, 1.8160, 1.3749, ..., 1.1790, 1.2399, 1.8836], [1.6524, 1.3047, 1.5435, ..., 1.1600, 1... [1.6829, 1.0316, 1.2869, ..., 1.1500, 1.4020, 1.4034]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype1-mul-u-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.5738, 1.1004, 1.4082, ..., 1.5219, 1.2581, 1.5546]], [[1.5505, 1.2458, 1.2889, ..., 1.8906,... [[1.0605, 1.9541, 1.7632, ..., 1.3258, 1.6364, 1.8737]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.5424, 1.5585, 1.2291, ..., 1.2602, 1.1737, 1.1702], [1.9105, 1.7044, 1.8732, ..., 1.3988, 1... [1.7424, 1.0190, 1.0684, ..., 1.9680, 1.7334, 1.1538]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype1-mul-u-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.4715, 1.0878, 1.4817], [1.3317, 1.2222, 1.8158], [1.1740, 1.6384, 1.7323]]], ...9], [1.9976, 1.8154, 1.3285], [1.2037, 1.0261, 1.6133]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1280, 1.2938, 1.3190]], [[1.5742, 1.6153, 1.9799]], [[1.8381, 1.2140, 1.6567]], ... [[1.5114, 1.6171, 1.4929]], [[1.7910, 1.4763, 1.8406]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype1-mul-u-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.2785, 1.8520, 1.2977], [1.1036, 1.9371, 1.5487], [1.0514, 1.0439, 1.1750]]], ...6], [1.8539, 1.7685, 1.2290], [1.7869, 1.0038, 1.7770]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9758, 1.2727, 1.7402]], [[1.8349, 1.7189, 1.1814]], [[1.9275, 1.5968, 1.7652]], ... [[1.6536, 1.4018, 1.5574]], [[1.0974, 1.7586, 1.1138]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype1-mul-u-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0989, 1.7847, 1.6674], [1.2205, 1.1681, 1.8298], [1.3430, 1.8901, 1.4197], [1.6268,... 1.9569], [1.5157, 1.9466, 1.3382], [1.9357, 1.6787, 1.0558]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1655, 1.2264, 1.1719], [1.7137, 1.6528, 1.5447], [1.4748, 1.3955, 1.5920], [1.7262,... 1.6938], [1.2340, 1.2767, 1.8034], [1.6512, 1.4512, 1.5102]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype1-mul-u-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0091, 1.8343, 1.2350], [1.0361, 1.2128, 1.5819], [1.9829, 1.0141, 1.8584], [1.1760,... 1.3860], [1.9954, 1.8739, 1.2644], [1.0534, 1.6242, 1.8251]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9991, 1.5945, 1.4981], [1.5311, 1.4832, 1.8950], [1.5726, 1.3015, 1.6665], [1.5619,... 1.0557], [1.4517, 1.7113, 1.2186], [1.8020, 1.6131, 1.5585]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype1-mul-u-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.8514], [1.9614], [1.0299], [1.7878], [1.7093], [1.5252], [1...304], [1.3203], [1.7522], [1.5695], [1.8629]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7895], [1.4785], [1.1928], [1.0555], [1.4862], [1.5159], [1...902], [1.5637], [1.0352], [1.9255], [1.5763]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype1-mul-u-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4124], [1.8754], [1.3747], [1.2632], [1.3599], [1.0120], [1...557], [1.8140], [1.1345], [1.3646], [1.1662]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7591], [1.8211], [1.4248], [1.6079], [1.5766], [1.2623], [1...226], [1.1430], [1.3997], [1.2405], [1.1861]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_mul_u) _____________________ test_sddmm[idtype1-mul-u-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.3854], [1.9519], [1.6396]]], [[[1.2518], [1.0485], ...2]]], [[[1.2581], [1.3244], [1.4813]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2310]], [[1.0552]], [[1.2162]]]], [[[[1.1846]], [[1.090... [[[[1.0394]], [[1.8507]], [[1.5595]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype1-mul-u-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.4642], [1.3755], [1.1536]]], [[[1.7864], [1.4307], ...3]]], [[[1.4835], [1.3961], [1.7106]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2389]], [[1.1663]], [[1.9337]]]], [[[[1.3079]], [[1.808... [[[[1.8741]], [[1.4267]], [[1.9497]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype1-mul-u-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.3991, 1.0989, 1.1444, ..., 1.6576, 1.9765, 1.5259]], [[1.2263, 1.7218, 1.7377, ..., 1.0352,... [[1.8267, 1.7683, 1.7574, ..., 1.5849, 1.2964, 1.5571]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2335, 1.4929, 1.1598, ..., 1.9702, 1.1826, 1.5263], [1.9596, 1.9163, 1.3799, ..., 1.3414, 1... [1.9004, 1.7020, 1.3166, ..., 1.6751, 1.1374, 1.3156]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype1-mul-u-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.8714, 1.8575, 1.0125, ..., 1.5100, 1.6216, 1.2561]], [[1.8130, 1.8588, 1.2249, ..., 1.0622,... [[1.0455, 1.1080, 1.0686, ..., 1.1299, 1.5899, 1.4423]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6262, 1.0981, 1.1848, ..., 1.8794, 1.7959, 1.2538], [1.4129, 1.3388, 1.9704, ..., 1.7765, 1... [1.6579, 1.7841, 1.9555, ..., 1.1068, 1.2123, 1.6847]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype1-mul-u-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.2268, 1.5672, 1.5478], [1.2674, 1.9871, 1.0027], [1.5210, 1.2163, 1.8255]]], ...8], [1.6756, 1.7368, 1.3513], [1.1281, 1.5607, 1.4968]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9942, 1.4701, 1.7212]], [[1.0654, 1.4718, 1.0318]], [[1.7912, 1.0175, 1.9801]], ... [[1.5640, 1.6373, 1.1098]], [[1.2129, 1.5409, 1.6227]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype1-mul-u-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.8835, 1.5106, 1.8899], [1.4797, 1.7410, 1.6716], [1.9968, 1.8038, 1.7091]]], ...5], [1.9241, 1.9951, 1.3449], [1.7242, 1.8281, 1.0594]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.2986, 1.3706, 1.5430]], [[1.7057, 1.7337, 1.1026]], [[1.8655, 1.6313, 1.7918]], ... [[1.3972, 1.9239, 1.4840]], [[1.0105, 1.9309, 1.0577]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype1-mul-u-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.9786, 1.7374, 1.9428], [1.5016, 1.8362, 1.6301], [1.5201, 1.1490, 1.1774], [1.5926,... 1.9630], [1.0444, 1.1395, 1.7414], [1.1903, 1.3424, 1.0212]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7885, 1.1472, 1.7614], [1.9835, 1.6607, 1.3032], [1.8130, 1.4084, 1.1771], [1.1351,... 1.6904], [1.2697, 1.9368, 1.9555], [1.1745, 1.2780, 1.5579]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype1-mul-u-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.6067, 1.6205, 1.6731], [1.4293, 1.2572, 1.5483], [1.9376, 1.0875, 1.8784], [1.4495,... 1.0222], [1.8783, 1.9627, 1.8379], [1.6664, 1.8075, 1.4426]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6854, 1.3732, 1.9222], [1.3698, 1.5666, 1.7152], [1.0346, 1.6259, 1.4296], [1.1183,... 1.2803], [1.1913, 1.6895, 1.0111], [1.3378, 1.4486, 1.0581]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype1-mul-u-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4782], [1.4866], [1.8678], [1.4174], [1.9703], [1.6101], [1...451], [1.9476], [1.5639], [1.4552], [1.6073]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6875], [1.1658], [1.5206], [1.1277], [1.0370], [1.4307], [1...986], [1.2311], [1.1092], [1.4202], [1.3762]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype1-mul-u-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0902], [1.6350], [1.1043], [1.8910], [1.0808], [1.4964], [1...753], [1.9690], [1.2696], [1.9771], [1.1422]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6844], [1.9944], [1.8537], [1.8263], [1.1202], [1.2829], [1...249], [1.4571], [1.6680], [1.0194], [1.9979]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_mul_u) _____________________ test_sddmm[idtype1-mul-v-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.8668], [1.4754], [1.9082]]], [[[1.6112], [1.1442], ...2]]], [[[1.3700], [1.1320], [1.2164]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4848]], [[1.5027]], [[1.4367]]]], [[[[1.9293]], [[1.392... [[[[1.2344]], [[1.1072]], [[1.8930]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype1-mul-v-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.8950], [1.2756], [1.3521]]], [[[1.9388], [1.6291], ...8]]], [[[1.4780], [1.8980], [1.9817]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4746]], [[1.4285]], [[1.8713]]]], [[[[1.2079]], [[1.639... [[[[1.8496]], [[1.2931]], [[1.9952]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype1-mul-v-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.9236, 1.0743, 1.4444, ..., 1.3077, 1.6208, 1.6680]], [[1.4552, 1.7461, 1.9188, ..., 1.3765,... [[1.6586, 1.0983, 1.8119, ..., 1.1361, 1.6990, 1.0830]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.5830, 1.0039, 1.1217, ..., 1.9640, 1.4453, 1.0192], [1.5297, 1.0367, 1.8214, ..., 1.4523, 1... [1.2861, 1.4303, 1.7729, ..., 1.7188, 1.0989, 1.1608]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype1-mul-v-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.9709, 1.8779, 1.1597, ..., 1.7810, 1.7763, 1.1718]], [[1.0849, 1.8135, 1.9027, ..., 1.8702,... [[1.5438, 1.9210, 1.9572, ..., 1.9573, 1.7276, 1.1259]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0109, 1.1547, 1.2643, ..., 1.2379, 1.7375, 1.7215], [1.3536, 1.0327, 1.9459, ..., 1.1945, 1... [1.5211, 1.0722, 1.1555, ..., 1.6975, 1.4597, 1.4347]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype1-mul-v-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.1271, 1.4449, 1.7561], [1.8270, 1.5435, 1.1492], [1.9740, 1.7289, 1.2275]]], ...2], [1.0359, 1.9441, 1.7003], [1.0783, 1.9920, 1.6230]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5094, 1.5366, 1.3000]], [[1.4286, 1.6809, 1.4311]], [[1.2286, 1.3748, 1.8456]], ... [[1.6538, 1.5953, 1.2834]], [[1.8598, 1.7512, 1.0290]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype1-mul-v-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.6562, 1.9572, 1.6798], [1.8059, 1.6668, 1.3412], [1.7000, 1.0931, 1.2953]]], ...4], [1.1994, 1.8019, 1.1896], [1.6033, 1.5100, 1.3070]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8781, 1.5960, 1.4969]], [[1.0821, 1.2513, 1.0302]], [[1.8768, 1.8967, 1.1073]], ... [[1.7913, 1.8275, 1.9413]], [[1.0196, 1.0055, 1.7099]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype1-mul-v-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0708, 1.4712, 1.0718], [1.4940, 1.1141, 1.3583], [1.1039, 1.6606, 1.5475], [1.0872,... 1.9240], [1.4698, 1.2142, 1.2348], [1.7228, 1.7563, 1.7187]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6200, 1.0850, 1.2068], [1.1036, 1.6878, 1.7269], [1.8639, 1.5205, 1.4895], [1.5343,... 1.0668], [1.2700, 1.2286, 1.7354], [1.1518, 1.9806, 1.8310]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype1-mul-v-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.8526, 1.8628, 1.3842], [1.3539, 1.4571, 1.0369], [1.7153, 1.7374, 1.2340], [1.5772,... 1.9098], [1.0316, 1.5976, 1.6375], [1.1613, 1.3665, 1.3235]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7204, 1.7122, 1.6627], [1.4033, 1.9065, 1.1243], [1.2757, 1.8863, 1.8481], [1.9902,... 1.4510], [1.2278, 1.5195, 1.3484], [1.4602, 1.5960, 1.1746]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype1-mul-v-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.7447], [1.0977], [1.1605], [1.1699], [1.7280], [1.1955], [1...945], [1.0206], [1.8104], [1.9290], [1.2434]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3151], [1.8780], [1.0511], [1.5927], [1.7643], [1.5577], [1...291], [1.7006], [1.0785], [1.4544], [1.6991]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype1-mul-v-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4797], [1.5080], [1.3000], [1.2199], [1.6334], [1.1926], [1...438], [1.3532], [1.2832], [1.0975], [1.2944]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6318], [1.9717], [1.4260], [1.4815], [1.2073], [1.8216], [1...118], [1.7150], [1.9249], [1.7056], [1.6486]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_mul_v) _____________________ test_sddmm[idtype1-mul-v-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.2870], [1.1709], [1.1443]]], [[[1.0512], [1.6898], ...3]]], [[[1.9058], [1.8941], [1.2694]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8389]], [[1.8405]], [[1.5054]]]], [[[[1.5595]], [[1.642... [[[[1.4130]], [[1.8839]], [[1.2010]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype1-mul-v-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.6886], [1.4546], [1.3302]]], [[[1.2906], [1.4352], ...5]]], [[[1.9333], [1.3627], [1.8411]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.6726]], [[1.3669]], [[1.5893]]]], [[[[1.4796]], [[1.220... [[[[1.5919]], [[1.2949]], [[1.8926]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype1-mul-v-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.6259, 1.7264, 1.7769, ..., 1.4884, 1.3189, 1.6583]], [[1.5677, 1.4254, 1.7889, ..., 1.4070,... [[1.8092, 1.3862, 1.8646, ..., 1.1597, 1.9357, 1.5826]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7689, 1.2032, 1.2270, ..., 1.0566, 1.1873, 1.8577], [1.6220, 1.2643, 1.9575, ..., 1.4785, 1... [1.9379, 1.8563, 1.9795, ..., 1.8919, 1.5956, 1.1825]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype1-mul-v-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.3209, 1.3462, 1.2890, ..., 1.6273, 1.4436, 1.4973]], [[1.5195, 1.3820, 1.1296, ..., 1.0644,... [[1.1304, 1.9924, 1.5674, ..., 1.0432, 1.4992, 1.2157]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8544, 1.6506, 1.6443, ..., 1.6427, 1.1242, 1.2461], [1.5557, 1.1699, 1.3803, ..., 1.0661, 1... [1.2809, 1.6251, 1.6316, ..., 1.9050, 1.5734, 1.2653]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype1-mul-v-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.3258, 1.8935, 1.7949], [1.3176, 1.2671, 1.5264], [1.5445, 1.0973, 1.8597]]], ...6], [1.2539, 1.2207, 1.2083], [1.3331, 1.5108, 1.9461]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5799, 1.0558, 1.9009]], [[1.4771, 1.2857, 1.6309]], [[1.0133, 1.8170, 1.1772]], ... [[1.2495, 1.2330, 1.0486]], [[1.1928, 1.3125, 1.7528]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype1-mul-v-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.0455, 1.2440, 1.4769], [1.3110, 1.9650, 1.1153], [1.5924, 1.5585, 1.9389]]], ...6], [1.8498, 1.3496, 1.1115], [1.8914, 1.9516, 1.4108]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.2030, 1.0832, 1.8257]], [[1.0471, 1.6081, 1.4972]], [[1.3391, 1.0673, 1.1552]], ... [[1.0737, 1.1273, 1.4478]], [[1.8735, 1.9425, 1.3258]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype1-mul-v-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.8055, 1.7229, 1.4054], [1.8238, 1.9218, 1.1533], [1.6310, 1.6381, 1.2830], [1.7855,... 1.8201], [1.8971, 1.6831, 1.4362], [1.2436, 1.0087, 1.6565]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2965, 1.7617, 1.7138], [1.1397, 1.8248, 1.5737], [1.3713, 1.6038, 1.5635], [1.1334,... 1.8840], [1.6291, 1.6371, 1.5674], [1.4461, 1.9196, 1.3486]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype1-mul-v-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.6056, 1.0287, 1.8608], [1.1483, 1.0951, 1.4827], [1.0872, 1.0822, 1.4508], [1.9490,... 1.6094], [1.2282, 1.6168, 1.2994], [1.9344, 1.0077, 1.2450]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4011, 1.4621, 1.4492], [1.0278, 1.0604, 1.8991], [1.6385, 1.2406, 1.2096], [1.8295,... 1.0127], [1.5315, 1.1140, 1.2797], [1.3542, 1.6127, 1.5797]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype1-mul-v-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.8010], [1.2553], [1.7059], [1.1071], [1.7445], [1.1315], [1...248], [1.6432], [1.7878], [1.1287], [1.1543]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5986], [1.2796], [1.5926], [1.2006], [1.6274], [1.7098], [1...465], [1.8434], [1.9635], [1.0160], [1.2263]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype1-mul-v-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2812], [1.2206], [1.9070], [1.5653], [1.5974], [1.3065], [1...364], [1.9225], [1.0521], [1.0072], [1.5875]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4659], [1.7129], [1.8669], [1.2812], [1.2874], [1.9381], [1...273], [1.0436], [1.0531], [1.5536], [1.7149]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_mul_v) _____________________ test_sddmm[idtype1-mul-e-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.2091], [1.6041], [1.4995]]], [[[1.0597], [1.8227], ...3]]], [[[1.4028], [1.3849], [1.6059]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0087]], [[1.4885]], [[1.1595]]]], [[[[1.0937]], [[1.434... [[[[1.0382]], [[1.7318]], [[1.9591]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_mul_e)Client [1855] waits on 172.17.0.3:58773 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Client[0] in group[0] is exiting... _____________________ test_sddmm[idtype1-mul-e-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.9871], [1.0719], [1.7900]]], [[[1.9728], [1.7734], ...7]]], [[[1.4477], [1.6713], [1.9757]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7089]], [[1.6758]], [[1.9749]]]], [[[[1.9892]], [[1.794... [[[[1.0219]], [[1.5674]], [[1.9940]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype1-mul-e-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.7937, 1.0480, 1.2819, ..., 1.3761, 1.6345, 1.7707]], [[1.4677, 1.9887, 1.3785, ..., 1.0646,... [[1.1023, 1.6034, 1.9840, ..., 1.1519, 1.2516, 1.8233]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7166, 1.6330, 1.7850, ..., 1.8094, 1.3726, 1.9899], [1.2054, 1.5840, 1.9454, ..., 1.2852, 1... [1.4929, 1.7977, 1.0449, ..., 1.2169, 1.6463, 1.5163]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype1-mul-e-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.6966, 1.1328, 1.0874, ..., 1.5797, 1.1374, 1.7391]], [[1.8028, 1.1476, 1.3888, ..., 1.0179,... [[1.1818, 1.5013, 1.4979, ..., 1.6002, 1.7826, 1.4266]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4506, 1.4353, 1.0786, ..., 1.7154, 1.2219, 1.4575], [1.4823, 1.5309, 1.4850, ..., 1.0753, 1... [1.9302, 1.7808, 1.5517, ..., 1.4241, 1.3809, 1.0710]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype1-mul-e-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.4534, 1.4258, 1.7369], [1.0604, 1.1058, 1.8237], [1.1913, 1.3278, 1.3169]]], ...9], [1.0812, 1.3206, 1.0860], [1.0877, 1.9454, 1.3630]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4341, 1.0261, 1.9338]], [[1.4688, 1.0015, 1.9379]], [[1.3759, 1.8256, 1.2408]], ... [[1.5256, 1.1140, 1.4046]], [[1.2537, 1.9752, 1.6830]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype1-mul-e-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.0188, 1.0452, 1.9309], [1.3120, 1.3906, 1.8953], [1.0657, 1.1934, 1.4368]]], ...5], [1.4418, 1.0814, 1.2674], [1.7025, 1.0648, 1.5503]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1295, 1.0998, 1.8158]], [[1.5265, 1.3213, 1.4723]], [[1.2278, 1.0919, 1.3069]], ... [[1.6562, 1.1565, 1.6126]], [[1.3345, 1.1664, 1.8727]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype1-mul-e-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5889, 1.1640, 1.6539], [1.9048, 1.0412, 1.7000], [1.8646, 1.2115, 1.9822], [1.5114,... 1.8914], [1.1304, 1.8902, 1.9334], [1.6455, 1.5278, 1.9543]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9642, 1.0704, 1.2753], [1.9871, 1.8235, 1.0788], [1.9160, 1.7704, 1.7115], [1.9959,... 1.6998], [1.3684, 1.5637, 1.9469], [1.6608, 1.2065, 1.9630]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype1-mul-e-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4713, 1.7156, 1.3728], [1.9633, 1.0473, 1.6408], [1.2661, 1.0489, 1.1762], [1.9925,... 1.7833], [1.2893, 1.3998, 1.0311], [1.2190, 1.8728, 1.3518]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9493, 1.3654, 1.1701], [1.2903, 1.3318, 1.5707], [1.9637, 1.5268, 1.5765], [1.7259,... 1.1955], [1.0118, 1.6856, 1.4724], [1.7778, 1.2957, 1.1057]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype1-mul-e-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5438], [1.7096], [1.8274], [1.3256], [1.5151], [1.9734], [1...713], [1.3038], [1.1269], [1.4991], [1.6111]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3976], [1.0791], [1.5968], [1.7456], [1.1439], [1.3560], [1...848], [1.6299], [1.0889], [1.7709], [1.0695]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype1-mul-e-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0825], [1.5248], [1.1247], [1.4179], [1.7251], [1.7557], [1...816], [1.6505], [1.6945], [1.1515], [1.4240]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5619], [1.4914], [1.4919], [1.9564], [1.3221], [1.0574], [1...358], [1.8597], [1.0817], [1.4238], [1.0869]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_mul_e) _____________________ test_sddmm[idtype1-mul-e-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.8584], [1.4760], [1.0941]]], [[[1.3584], [1.4544], ...0]]], [[[1.1913], [1.9811], [1.3405]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7535]], [[1.7347]], [[1.7556]]]], [[[[1.9471]], [[1.410... [[[[1.2798]], [[1.4416]], [[1.6569]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype1-mul-e-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.4562], [1.8866], [1.1487]]], [[[1.4358], [1.9730], ...4]]], [[[1.4149], [1.3870], [1.4143]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0029]], [[1.6133]], [[1.4935]]]], [[[[1.7785]], [[1.714... [[[[1.2694]], [[1.9672]], [[1.8345]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype1-mul-e-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.2431, 1.6259, 1.3401, ..., 1.6003, 1.3444, 1.9827]], [[1.1797, 1.0830, 1.5008, ..., 1.3889,... [[1.9644, 1.8727, 1.7211, ..., 1.4322, 1.2887, 1.2948]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6684, 1.5015, 1.8387, ..., 1.0918, 1.3181, 1.2764], [1.0682, 1.9373, 1.8768, ..., 1.2729, 1... [1.8431, 1.0173, 1.9516, ..., 1.3804, 1.1138, 1.2195]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype1-mul-e-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'mul', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.1000, 1.9243, 1.5685, ..., 1.8226, 1.1347, 1.5372]], [[1.5681, 1.9130, 1.1376, ..., 1.4817,... [[1.3281, 1.7770, 1.5365, ..., 1.3003, 1.3167, 1.3647]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7397, 1.2011, 1.5537, ..., 1.8968, 1.5075, 1.5036], [1.4125, 1.6760, 1.6734, ..., 1.9716, 1... [1.8936, 1.8885, 1.6009, ..., 1.8860, 1.7184, 1.5022]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype1-mul-e-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.7272, 1.8152, 1.8586], [1.3600, 1.4937, 1.1282], [1.3696, 1.9875, 1.1373]]], ...2], [1.0375, 1.1773, 1.0199], [1.5049, 1.5845, 1.4399]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1519, 1.4474, 1.8998]], [[1.7206, 1.5584, 1.6795]], [[1.5041, 1.4939, 1.4070]], ... [[1.6481, 1.8856, 1.1983]], [[1.2620, 1.1034, 1.6047]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype1-mul-e-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.1652, 1.9152, 1.2894], [1.2694, 1.3219, 1.2988], [1.4443, 1.5157, 1.2813]]], ...2], [1.3920, 1.8013, 1.1823], [1.8857, 1.0490, 1.6126]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.3963, 1.3454, 1.0572]], [[1.1330, 1.6814, 1.2676]], [[1.5073, 1.0279, 1.5653]], ... [[1.9617, 1.2057, 1.0059]], [[1.6312, 1.4347, 1.6278]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype1-mul-e-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2438, 1.8076, 1.5272], [1.4223, 1.8100, 1.8955], [1.3991, 1.7024, 1.3753], [1.5545,... 1.0709], [1.8536, 1.0304, 1.2993], [1.6183, 1.7798, 1.4196]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1269, 1.5455, 1.5097], [1.9961, 1.1527, 1.5716], [1.1510, 1.2879, 1.4670], [1.3711,... 1.0947], [1.1198, 1.9452, 1.2436], [1.0655, 1.0889, 1.9567]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype1-mul-e-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.6105, 1.0824, 1.5420], [1.9821, 1.8779, 1.0206], [1.8394, 1.2962, 1.3744], [1.0116,... 1.4157], [1.3231, 1.0906, 1.5152], [1.2331, 1.3382, 1.2811]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4688, 1.6436, 1.4887], [1.4417, 1.3062, 1.0146], [1.4402, 1.9420, 1.2522], [1.5830,... 1.9011], [1.2394, 1.1699, 1.2997], [1.1173, 1.2023, 1.9297]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype1-mul-e-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0819], [1.3501], [1.3166], [1.7673], [1.8311], [1.1960], [1...065], [1.5400], [1.6630], [1.2732], [1.9890]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1304], [1.3613], [1.8186], [1.2268], [1.9539], [1.9699], [1...635], [1.2263], [1.6695], [1.5738], [1.1066]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype1-mul-e-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'mul' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2050], [1.6784], [1.4032], [1.6232], [1.8185], [1.6023], [1...702], [1.6586], [1.8764], [1.5565], [1.1835]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8703], [1.1009], [1.7755], [1.6587], [1.6815], [1.0922], [1...968], [1.3828], [1.9065], [1.0431], [1.3888]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_mul_e) _____________________ test_sddmm[idtype1-div-u-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.3267], [1.5514], [1.6718]]], [[[1.8335], [1.4144], ...0]]], [[[1.6789], [1.6344], [1.5417]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5170]], [[0.5800]], [[0.6923]]]], [[[[0.8538]], [[0.512... [[[[0.7897]], [[0.6037]], [[0.8538]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype1-div-u-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.8091], [1.4656], [1.7126]]], [[[1.6917], [1.6925], ...1]]], [[[1.2970], [1.3452], [1.4641]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6777]], [[0.5402]], [[0.5131]]]], [[[[0.6018]], [[0.743... [[[[0.5015]], [[0.5749]], [[0.5426]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype1-div-u-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.0940, 1.8524, 1.3775, ..., 1.0559, 1.3075, 1.8208]], [[1.8323, 1.6675, 1.1852, ..., 1.0768,... [[1.7131, 1.8282, 1.9809, ..., 1.9622, 1.7790, 1.7616]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5675, 0.5541, 0.5607, ..., 0.5650, 0.5374, 0.5964], [0.5795, 0.8476, 0.7291, ..., 0.5087, 0... [0.6079, 0.5399, 0.6167, ..., 0.5313, 0.8087, 0.5988]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype1-div-u-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.9849, 1.3669, 1.6298, ..., 1.0795, 1.5194, 1.9437]], [[1.8941, 1.9953, 1.6785, ..., 1.8819,... [[1.5846, 1.7233, 1.1018, ..., 1.6152, 1.5017, 1.5194]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5835, 0.8168, 0.5343, ..., 0.6379, 0.5567, 0.8736], [0.8719, 0.5296, 0.6563, ..., 0.6355, 0... [0.6348, 0.8694, 0.8777, ..., 0.9575, 0.7697, 0.6561]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype1-div-u-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.1791, 1.7044, 1.8254], [1.7367, 1.1623, 1.1621], [1.1044, 1.6694, 1.3022]]], ...8], [1.9425, 1.0571, 1.7099], [1.1015, 1.9357, 1.1846]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.7003, 0.5946, 0.6661]], [[0.5057, 0.5971, 0.6115]], [[0.7977, 0.6525, 0.6440]], ...[[0.7840, 0.7627, 0.8502]], [[0.8777, 0.8470, 0.7649]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype1-div-u-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.9847, 1.9895, 1.3482], [1.7031, 1.3355, 1.2070], [1.0349, 1.6468, 1.5403]]], ...0], [1.0783, 1.7128, 1.1107], [1.3201, 1.1327, 1.3662]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.6698, 0.7466, 0.6540]], [[0.9664, 0.7073, 0.8249]], [[0.5920, 0.7863, 0.8928]], ...[[0.5459, 0.8889, 0.6265]], [[0.7125, 0.6286, 0.7322]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype1-div-u-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.7896, 1.8404, 1.3619], [1.3703, 1.4828, 1.0450], [1.3289, 1.4439, 1.2330], [1.6568,... 1.2989], [1.1626, 1.9586, 1.6591], [1.4603, 1.8263, 1.7479]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6091, 0.7491, 0.5951], [0.7064, 0.5264, 0.5021], [0.9358, 0.5760, 0.6383], [0.5898,...539], [0.5285, 0.7217, 0.6500], [0.7819, 0.5251, 0.8974]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype1-div-u-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.7961, 1.6695, 1.6413], [1.8898, 1.4786, 1.4683], [1.9651, 1.3024, 1.5569], [1.7996,... 1.4638], [1.5506, 1.2230, 1.3609], [1.4023, 1.7919, 1.5597]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5062, 0.9785, 0.6619], [0.6365, 0.5337, 0.5154], [0.5422, 0.5006, 0.5692], [0.9793,...242], [0.6024, 0.6666, 0.9948], [0.9206, 0.5391, 0.8683]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype1-div-u-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0806], [1.3323], [1.9814], [1.5548], [1.5535], [1.2513], [1...733], [1.7836], [1.7014], [1.3369], [1.1266]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.8006], [0.8300], [0.5795], [0.8089], [0.6091], [0.9510], [0..., [0.6527], [0.5749], [0.5928], [0.8624]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype1-div-u-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5869], [1.2869], [1.2853], [1.2111], [1.0599], [1.2761], [1...638], [1.8260], [1.6971], [1.1956], [1.6558]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5080], [0.8520], [0.8038], [0.9543], [0.6219], [0.7422], [0..., [0.5403], [0.6427], [0.6079], [0.6008]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_div_u) _____________________ test_sddmm[idtype1-div-u-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.0336], [1.4484], [1.3914]]], [[[1.9198], [1.5298], ...4]]], [[[1.8633], [1.1969], [1.5978]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.7462]], [[0.9000]], [[0.6626]]]], [[[[0.9185]], [[0.599... [[[[0.6779]], [[0.7199]], [[0.7764]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype1-div-u-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.9036], [1.1184], [1.7869]]], [[[1.3016], [1.2753], ...7]]], [[[1.7089], [1.0625], [1.6879]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.9893]], [[0.6345]], [[0.5146]]]], [[[[0.5039]], [[0.682... [[[[0.9139]], [[0.5092]], [[0.5010]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype1-div-u-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.0564, 1.8778, 1.8352, ..., 1.0682, 1.6198, 1.5083]], [[1.1241, 1.5413, 1.8917, ..., 1.7206,... [[1.4000, 1.6573, 1.7527, ..., 1.3895, 1.8649, 1.6804]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.8797, 0.5105, 0.6319, ..., 0.7886, 0.5280, 0.9287], [0.6329, 0.7801, 0.6077, ..., 0.7847, 0... [0.5535, 0.5214, 0.7836, ..., 0.5942, 0.7181, 0.7019]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype1-div-u-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.0432, 1.0792, 1.7461, ..., 1.0150, 1.0637, 1.0870]], [[1.6880, 1.5385, 1.3657, ..., 1.3778,... [[1.3937, 1.6154, 1.9238, ..., 1.2538, 1.9396, 1.7517]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5775, 0.8969, 0.7804, ..., 0.6310, 0.7377, 0.6524], [0.9355, 0.7800, 0.8923, ..., 0.5583, 0... [0.7168, 0.6027, 0.9568, ..., 0.5766, 0.5374, 0.5515]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype1-div-u-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.0824, 1.6020, 1.9945], [1.6854, 1.9621, 1.0959], [1.7682, 1.3474, 1.6673]]], ...7], [1.0023, 1.5537, 1.0459], [1.1140, 1.1316, 1.8250]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.5497, 0.5851, 0.8793]], [[0.6926, 0.5842, 0.6744]], [[0.5338, 0.7333, 0.6026]], ...[[0.7001, 0.6154, 0.7896]], [[0.6408, 0.5829, 0.5523]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype1-div-u-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.7166, 1.4005, 1.7477], [1.4573, 1.0533, 1.7876], [1.1963, 1.6245, 1.2920]]], ...2], [1.2111, 1.7123, 1.5131], [1.4187, 1.0807, 1.5301]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.7838, 0.8823, 0.6293]], [[0.8066, 0.5033, 0.5635]], [[0.5817, 0.5481, 0.6656]], ...[[0.8036, 0.5432, 0.8065]], [[0.7118, 0.5386, 0.8360]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype1-div-u-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.9580, 1.0852, 1.5475], [1.3932, 1.0797, 1.2081], [1.0054, 1.8459, 1.7408], [1.9593,... 1.6804], [1.5395, 1.7258, 1.1065], [1.6809, 1.5847, 1.7103]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5001, 0.9066, 0.8645], [0.8584, 0.9838, 0.6409], [0.6401, 0.6440, 0.6612], [0.5162,...968], [0.6136, 0.7492, 0.9209], [0.8043, 0.6843, 0.5179]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype1-div-u-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.8987, 1.5644, 1.7559], [1.4956, 1.2446, 1.6133], [1.6692, 1.8703, 1.4445], [1.0109,... 1.4918], [1.4810, 1.0696, 1.9411], [1.2551, 1.9457, 1.9244]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5587, 0.6647, 0.5768], [0.9532, 0.5342, 0.8432], [0.5054, 0.5882, 0.7498], [0.6976,...906], [0.5383, 0.6119, 0.6429], [0.6145, 0.6497, 0.6247]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype1-div-u-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4006], [1.1180], [1.8767], [1.9691], [1.9281], [1.0048], [1...635], [1.4981], [1.7993], [1.4952], [1.4270]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6370], [0.5495], [0.7585], [0.5051], [0.8274], [0.8750], [0..., [0.6250], [0.5060], [0.9236], [0.5274]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype1-div-u-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0343], [1.5394], [1.7950], [1.7214], [1.7305], [1.4360], [1...108], [1.3343], [1.4553], [1.5640], [1.2713]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5387], [0.9368], [0.5916], [0.7420], [0.5982], [0.5360], [0..., [0.9509], [0.5313], [0.5922], [0.5836]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_div_u) _____________________ test_sddmm[idtype1-div-v-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.4849], [1.9476], [1.2178]]], [[[1.9958], [1.5739], ...3]]], [[[1.2187], [1.9133], [1.6610]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5071]], [[0.5178]], [[0.9643]]]], [[[[0.5922]], [[0.702... [[[[0.8349]], [[0.7025]], [[0.6696]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype1-div-v-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.5722], [1.7491], [1.3023]]], [[[1.0971], [1.1750], ...6]]], [[[1.7476], [1.2014], [1.7119]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.9891]], [[0.9913]], [[0.6793]]]], [[[[0.6734]], [[0.563... [[[[0.5279]], [[0.5035]], [[0.9785]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype1-div-v-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.0296, 1.4276, 1.4322, ..., 1.6284, 1.5918, 1.1676]], [[1.5146, 1.8067, 1.6551, ..., 1.5064,... [[1.3988, 1.7330, 1.9913, ..., 1.6052, 1.0411, 1.6197]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5024, 0.5875, 0.5020, ..., 0.5552, 0.5263, 0.7511], [0.5061, 0.6722, 0.5899, ..., 0.7354, 0... [0.9766, 0.6395, 0.6618, ..., 0.5796, 0.6840, 0.6987]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype1-div-v-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.7606, 1.2121, 1.7528, ..., 1.0741, 1.0480, 1.2104]], [[1.0972, 1.8569, 1.2252, ..., 1.0506,... [[1.9504, 1.2783, 1.2846, ..., 1.3514, 1.1990, 1.4715]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5854, 0.5309, 0.7232, ..., 0.7326, 0.8136, 0.9944], [0.5903, 0.8760, 0.9333, ..., 0.8738, 0... [0.5336, 0.7239, 0.5823, ..., 0.6226, 0.6955, 0.7124]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype1-div-v-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.1747, 1.3128, 1.1715], [1.0669, 1.1022, 1.7639], [1.5306, 1.7145, 1.7450]]], ...0], [1.4901, 1.6857, 1.4852], [1.9524, 1.7607, 1.0696]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.7583, 0.5163, 0.5145]], [[0.8695, 0.9617, 0.5339]], [[0.7004, 0.8415, 0.6316]], ...[[0.5868, 0.5949, 0.9847]], [[0.7305, 0.6118, 0.8718]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype1-div-v-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.8618, 1.3926, 1.1581], [1.5585, 1.5470, 1.6373], [1.1260, 1.9382, 1.3553]]], ...4], [1.0317, 1.0414, 1.6079], [1.8561, 1.8542, 1.6447]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.8702, 0.7191, 0.5482]], [[0.6401, 0.9669, 0.6721]], [[0.6168, 0.5266, 0.7429]], ...[[0.7474, 0.5841, 0.7410]], [[0.7015, 0.6405, 0.5733]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype1-div-v-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.1445, 1.5504, 1.9072], [1.3397, 1.0500, 1.4681], [1.5581, 1.3720, 1.1637], [1.6676,... 1.2990], [1.8399, 1.4033, 1.3475], [1.1070, 1.9276, 1.3255]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7424, 0.9794, 0.6131], [0.9465, 0.6225, 0.8821], [0.7037, 0.6592, 0.5176], [0.6601,...052], [0.7306, 0.7130, 0.5719], [0.5871, 0.6352, 0.5707]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype1-div-v-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.3546, 1.6474, 1.2912], [1.3105, 1.0100, 1.7811], [1.2823, 1.1580, 1.6820], [1.3746,... 1.6846], [1.6060, 1.0721, 1.3378], [1.4544, 1.1346, 1.7591]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9172, 0.5466, 0.7954], [0.5316, 0.6252, 0.5874], [0.7533, 0.5662, 0.9814], [0.5111,...078], [0.8703, 0.6106, 0.9892], [0.7870, 0.7476, 0.7541]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype1-div-v-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.8756], [1.3721], [1.1484], [1.4799], [1.0891], [1.2399], [1...686], [1.9269], [1.8710], [1.6208], [1.1022]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5975], [0.7085], [0.6019], [0.7789], [0.7155], [0.6698], [0..., [0.5358], [0.5477], [0.5694], [0.5290]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype1-div-v-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.7437], [1.0608], [1.2018], [1.2732], [1.1948], [1.2048], [1...511], [1.4895], [1.0911], [1.3330], [1.0628]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7181], [0.8371], [0.8549], [0.9973], [0.6662], [0.9085], [0..., [0.7999], [0.8134], [0.8213], [0.7588]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_div_v) _____________________ test_sddmm[idtype1-div-v-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.0842], [1.9470], [1.1406]]], [[[1.4130], [1.8032], ...4]]], [[[1.7742], [1.8163], [1.9835]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6188]], [[0.9997]], [[0.5720]]]], [[[[0.8190]], [[0.565... [[[[0.8070]], [[0.5386]], [[0.8603]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype1-div-v-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.6496], [1.3593], [1.6356]]], [[[1.8758], [1.8274], ...1]]], [[[1.4382], [1.6970], [1.7394]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5325]], [[0.7225]], [[0.5020]]]], [[[[0.5997]], [[0.939... [[[[0.5678]], [[0.5997]], [[0.8961]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype1-div-v-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.7031, 1.4639, 1.7386, ..., 1.9274, 1.9027, 1.9381]], [[1.8258, 1.7656, 1.4615, ..., 1.2879,... [[1.5038, 1.6215, 1.2240, ..., 1.1297, 1.0297, 1.4822]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5852, 0.7411, 0.5133, ..., 0.9405, 0.6973, 0.6454], [0.5190, 0.5984, 0.5570, ..., 0.7665, 0... [0.7409, 0.5313, 0.7410, ..., 0.7530, 0.5074, 0.9129]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype1-div-v-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.6833, 1.0193, 1.5016, ..., 1.7930, 1.4734, 1.5235]], [[1.0592, 1.6965, 1.9324, ..., 1.6573,... [[1.4558, 1.9102, 1.7252, ..., 1.9135, 1.7847, 1.8627]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.6092, 0.5621, 0.8365, ..., 0.8749, 0.8817, 0.5297], [0.5059, 0.7434, 0.8918, ..., 0.7337, 0... [0.8380, 0.5863, 0.5043, ..., 0.5902, 0.5191, 0.6839]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype1-div-v-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.1267, 1.6303, 1.1637], [1.1344, 1.2095, 1.5798], [1.4941, 1.2798, 1.0491]]], ...1], [1.4829, 1.7115, 1.0869], [1.1247, 1.1216, 1.2771]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.9263, 0.5910, 0.5040]], [[0.5893, 0.5539, 0.5258]], [[0.5103, 0.6093, 0.9632]], ...[[0.6545, 0.6107, 0.9108]], [[0.8786, 0.5377, 0.7057]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype1-div-v-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.4500, 1.8919, 1.2470], [1.9012, 1.6501, 1.7750], [1.8768, 1.6705, 1.7534]]], ...6], [1.0325, 1.8154, 1.1440], [1.3809, 1.1682, 1.5104]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.7320, 0.6293, 0.5393]], [[0.6187, 0.5619, 0.6487]], [[0.6389, 0.5306, 0.8432]], ...[[0.5642, 0.6182, 0.8368]], [[0.7509, 0.5392, 0.5442]]]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype1-div-v-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.9576, 1.2553, 1.9669], [1.3483, 1.1628, 1.6334], [1.1897, 1.5169, 1.1966], [1.9336,... 1.0504], [1.4019, 1.2244, 1.9385], [1.5849, 1.0510, 1.8974]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5944, 0.9707, 0.6103], [0.8273, 0.8661, 0.6468], [0.7376, 0.7088, 0.5330], [0.5606,...821], [0.6281, 0.5489, 0.9211], [0.5118, 0.8092, 0.8763]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype1-div-v-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.5098, 1.0502, 1.0430], [1.8293, 1.2776, 1.0073], [1.9160, 1.4866, 1.6121], [1.4524,... 1.2236], [1.2868, 1.0066, 1.1087], [1.8112, 1.1180, 1.6217]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.7146, 0.5055, 0.6538], [0.5538, 0.8570, 0.7870], [0.5866, 0.5792, 0.6056], [0.8468,...540], [0.5201, 0.8855, 0.5933], [0.5745, 0.5619, 0.8353]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype1-div-v-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2091], [1.1438], [1.7597], [1.9748], [1.3687], [1.1905], [1...685], [1.2516], [1.4729], [1.0888], [1.6813]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5726], [0.7527], [0.5347], [0.7207], [0.6294], [0.8150], [0..., [0.5507], [0.9374], [0.5007], [0.5648]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype1-div-v-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.6198], [1.4795], [1.9537], [1.0415], [1.3658], [1.7181], [1...870], [1.6126], [1.2706], [1.1624], [1.5998]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6909], [0.6657], [0.8059], [0.6008], [0.6530], [0.7227], [0..., [0.5109], [0.6518], [0.8629], [0.8798]], dtype=torch.float64, grad_fn=) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_div_v) _____________________ test_sddmm[idtype1-div-e-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.5787], [1.9254], [1.4822]]], [[[1.1826], [1.1512], ...5]]], [[[1.6681], [1.9337], [1.5997]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5372]], [[0.7708]], [[0.6228]]]], [[[[0.6108]], [[0.968... [[[[0.8428]], [[0.6000]], [[0.6049]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype1-div-e-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.0497], [1.3980], [1.1248]]], [[[1.8696], [1.5180], ...3]]], [[[1.8786], [1.1540], [1.4359]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6858]], [[0.6631]], [[0.6284]]]], [[[[0.5485]], [[0.512... [[[[0.7975]], [[0.8801]], [[0.7680]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype1-div-e-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.4389, 1.6032, 1.6155, ..., 1.3564, 1.2222, 1.3152]], [[1.3897, 1.5175, 1.4667, ..., 1.9636,... [[1.9316, 1.2442, 1.5770, ..., 1.7335, 1.7845, 1.4579]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.6778, 0.9497, 0.6679, ..., 0.6343, 0.6524, 0.5106], [0.7241, 0.7295, 0.6675, ..., 0.5063, 0... [0.8261, 0.9609, 0.5788, ..., 0.9775, 0.6056, 0.5544]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype1-div-e-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.4760, 1.9773, 1.2339, ..., 1.5700, 1.2312, 1.4203]], [[1.0392, 1.0729, 1.2471, ..., 1.1620,... [[1.8361, 1.5921, 1.2374, ..., 1.6536, 1.1545, 1.4498]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.7756, 0.6699, 0.6004, ..., 0.8856, 0.7880, 0.5103], [0.8095, 0.7432, 0.7333, ..., 0.6132, 0... [0.9311, 0.5521, 0.7497, ..., 0.5195, 0.5313, 0.5483]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype1-div-e-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.8924, 1.9017, 1.3467], [1.8844, 1.8657, 1.4196], [1.8300, 1.8229, 1.9475]]], ...5], [1.2248, 1.2928, 1.1088], [1.4213, 1.4955, 1.0926]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.5265, 0.6423, 0.7433]], [[0.5001, 0.6110, 0.7751]], [[0.6125, 0.9162, 0.5747]], ...[[0.8856, 0.6100, 0.5867]], [[0.8623, 0.5062, 0.5554]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype1-div-e-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.0559, 1.7663, 1.5525], [1.3992, 1.8087, 1.6537], [1.3712, 1.0787, 1.0675]]], ...7], [1.5915, 1.6187, 1.4747], [1.7886, 1.2517, 1.5389]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.8218, 0.5791, 0.7331]], [[0.8182, 0.7430, 0.5559]], [[0.8706, 0.5964, 0.6225]], ...[[0.5669, 0.7311, 0.5241]], [[0.5663, 0.7395, 0.9309]]]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype1-div-e-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.1938, 1.7497, 1.8304], [1.0364, 1.1151, 1.6600], [1.9304, 1.2881, 1.3306], [1.7613,... 1.7814], [1.4301, 1.7045, 1.3185], [1.1038, 1.5673, 1.8023]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6159, 0.6329, 0.8159], [0.5124, 0.8333, 0.5030], [0.8938, 0.5063, 0.5986], [0.9832,...629], [0.5790, 0.7223, 0.8672], [0.5482, 0.5269, 0.7914]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype1-div-e-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4834, 1.2620, 1.4882], [1.9454, 1.6106, 1.7618], [1.2441, 1.6945, 1.3341], [1.5991,... 1.5447], [1.4072, 1.8494, 1.5500], [1.5442, 1.0979, 1.5081]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6620, 0.7021, 0.7077], [0.6188, 0.7651, 0.6241], [0.9593, 0.5740, 0.5667], [0.5869,...675], [0.8086, 0.5203, 0.5293], [0.7263, 0.7347, 0.6045]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype1-div-e-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.6643], [1.8286], [1.7351], [1.7639], [1.7416], [1.7143], [1...201], [1.4222], [1.6598], [1.4736], [1.9381]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5728], [0.5221], [0.6619], [0.5392], [0.5273], [0.9652], [0..., [0.5448], [0.6262], [0.5994], [0.5492]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype1-div-e-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.7038], [1.7336], [1.6811], [1.4660], [1.5304], [1.1648], [1...964], [1.7800], [1.4928], [1.8949], [1.6912]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.5038], [0.7487], [0.7345], [0.6095], [0.6039], [0.6319], [0..., [0.6771], [0.5248], [0.5708], [0.7657]], dtype=torch.float64, grad_fn=) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_div_e) _____________________ test_sddmm[idtype1-div-e-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.4364], [1.6382], [1.9810]]], [[[1.9048], [1.0283], ...4]]], [[[1.0579], [1.3374], [1.3963]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.6942]], [[0.5389]], [[0.7697]]]], [[[[0.8847]], [[0.575... [[[[0.9564]], [[0.8027]], [[0.5766]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype1-div-e-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[[1.0387], [1.5022], [1.1555]]], [[[1.6103], [1.8578], ...8]]], [[[1.0593], [1.2320], [1.1111]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[0.5928]], [[0.7341]], [[0.5511]]]], [[[[0.6236]], [[0.645... [[[[0.7769]], [[0.5712]], [[0.5798]]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype1-div-e-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.2380, 1.7252, 1.2806, ..., 1.3164, 1.1429, 1.7729]], [[1.1620, 1.5094, 1.5257, ..., 1.4025,... [[1.8423, 1.1178, 1.9932, ..., 1.5302, 1.3837, 1.5648]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.9336, 0.9392, 0.9603, ..., 0.5776, 0.7538, 0.8107], [0.5363, 0.8434, 0.5953, ..., 0.7883, 0... [0.8423, 0.5362, 0.6850, ..., 0.6196, 0.7930, 0.5256]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype1-div-e-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'div', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[[1.5372, 1.1394, 1.5685, ..., 1.4138, 1.4269, 1.4358]], [[1.3510, 1.1368, 1.6741, ..., 1.4809,... [[1.8473, 1.2733, 1.4840, ..., 1.5750, 1.2731, 1.6663]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[0.5085, 0.6526, 0.9399, ..., 0.9721, 0.9807, 0.7947], [0.5468, 0.8228, 0.5806, ..., 0.5097, 0... [0.5225, 0.6731, 0.8384, ..., 0.5976, 0.8337, 0.7091]]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype1-div-e-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.6324, 1.6124, 1.7800], [1.0068, 1.1822, 1.3675], [1.1928, 1.7627, 1.5024]]], ...6], [1.0510, 1.5573, 1.5522], [1.4307, 1.3714, 1.7270]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.7490, 0.9849, 0.5162]], [[0.8117, 0.9630, 0.6639]], [[0.7226, 0.7211, 0.5299]], ...[[0.6170, 0.6270, 0.9441]], [[0.5857, 0.9567, 0.6860]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype1-div-e-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[[[1.9865, 1.8408, 1.8303], [1.9357, 1.2549, 1.4069], [1.8510, 1.9848, 1.3306]]], ...0], [1.2396, 1.8497, 1.4609], [1.9788, 1.1936, 1.3777]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[0.8203, 0.9037, 0.7620]], [[0.9624, 0.8962, 0.8656]], [[0.6471, 0.6209, 0.5313]], ...[[0.8333, 0.5884, 0.7613]], [[0.9339, 0.8862, 0.5511]]]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype1-div-e-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2187, 1.0082, 1.1930], [1.9563, 1.5278, 1.4786], [1.9947, 1.9068, 1.7273], [1.0381,... 1.6719], [1.9231, 1.1595, 1.0325], [1.0777, 1.8706, 1.7916]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6129, 0.7580, 0.7973], [0.5116, 0.5225, 0.7652], [0.5080, 0.5693, 0.5776], [0.5886,...280], [0.6755, 0.5693, 0.6948], [0.9887, 0.9151, 0.5274]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype1-div-e-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.4120, 1.0809, 1.5701], [1.4447, 1.8924, 1.3372], [1.0333, 1.8482, 1.3602], [1.7132,... 1.6523], [1.5197, 1.8825, 1.9653], [1.1747, 1.0930, 1.9432]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.9633, 0.9563, 0.6105], [0.7753, 0.8769, 0.5391], [0.7560, 0.7548, 0.5051], [0.6896,...319], [0.9452, 0.6187, 0.5087], [0.9431, 0.6061, 0.5799]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype1-div-e-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.2674], [1.4155], [1.0721], [1.1219], [1.9644], [1.7487], [1...462], [1.6972], [1.7045], [1.8428], [1.7066]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.8901], [0.8067], [0.8978], [0.7434], [0.8903], [0.5139], [0..., [0.7003], [0.8689], [0.7335], [0.7119]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype1-div-e-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'div' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul' lhs_data = tensor([[1.0628], [1.4392], [1.2294], [1.8668], [1.1214], [1.3314], [1...048], [1.9779], [1.7646], [1.2075], [1.5006]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[0.6647], [0.6761], [0.8345], [0.5349], [0.5143], [0.9442], [0..., [0.8277], [0.7046], [0.7166], [0.5978]], dtype=torch.float64, grad_fn=) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_div_e) _____________________ test_sddmm[idtype1-dot-u-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.8189], [1.3453], [1.8884]]], [[[1.7532], [1.0228], ...4]]], [[[1.4853], [1.6186], [1.9518]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9099]], [[1.0278]], [[1.0772]]]], [[[[1.2251]], [[1.573... [[[[1.7330]], [[1.8190]], [[1.0781]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype1-dot-u-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.7760], [1.2393], [1.5138]]], [[[1.0175], [1.3400], ...8]]], [[[1.4799], [1.6625], [1.1197]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0297]], [[1.5895]], [[1.9178]]]], [[[[1.1162]], [[1.769... [[[[1.8005]], [[1.8173]], [[1.8691]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype1-dot-u-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.9286, 1.0802, 1.0907, ..., 1.5392, 1.7572, 1.0918]], [[1.7500, 1.4109, 1.9551, ..., 1.4802,... [[1.3761, 1.6389, 1.9952, ..., 1.0159, 1.1743, 1.4686]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.7081, 1.4755, 1.8697, ..., 1.8235, 1.9327, 1.6759], [1.2111, 1.8813, 1.5229, ..., 1.0063, 1... [1.6095, 1.5583, 1.1600, ..., 1.7183, 1.2343, 1.7711]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype1-dot-u-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.1897, 1.5240, 1.1583, ..., 1.6977, 1.9428, 1.2124]], [[1.2515, 1.0954, 1.5262, ..., 1.7140,... [[1.0679, 1.7825, 1.4596, ..., 1.3151, 1.4537, 1.4969]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.5634, 1.3394, 1.4448, ..., 1.5168, 1.2215, 1.3454], [1.3187, 1.1328, 1.3904, ..., 1.1254, 1... [1.0594, 1.6545, 1.9769, ..., 1.8870, 1.1332, 1.6121]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype1-dot-u-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.7154, 1.9857, 1.4841], [1.1353, 1.9368, 1.9457], [1.4903, 1.4256, 1.1381]]], ...1], [1.5464, 1.7996, 1.6934], [1.1862, 1.7508, 1.7190]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4748, 1.9218, 1.8959]], [[1.3226, 1.5075, 1.2893]], [[1.3137, 1.6486, 1.5460]], ... [[1.7130, 1.1586, 1.8846]], [[1.5466, 1.4934, 1.7203]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype1-dot-u-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.2984, 1.1937, 1.1768], [1.1830, 1.9557, 1.3281], [1.0873, 1.8844, 1.6834]]], ...4], [1.2345, 1.1369, 1.5728], [1.2106, 1.1660, 1.4732]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1782, 1.5760, 1.4976]], [[1.7414, 1.7780, 1.3291]], [[1.7296, 1.9534, 1.2018]], ... [[1.5180, 1.2065, 1.5405]], [[1.2616, 1.1475, 1.6568]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype1-dot-u-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.5221, 1.7418, 1.6335], [1.8512, 1.8646, 1.1893], [1.4918, 1.0388, 1.9677], [1.0985,... 1.8488], [1.9915, 1.6781, 1.3986], [1.4906, 1.3865, 1.4576]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6965, 1.2033, 1.6021], [1.9703, 1.8728, 1.7426], [1.6681, 1.4684, 1.6405], [1.7241,... 1.3571], [1.4551, 1.4522, 1.5083], [1.6603, 1.9156, 1.6470]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype1-dot-u-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.5969, 1.1467, 1.6152], [1.0161, 1.4809, 1.7099], [1.7721, 1.7740, 1.1722], [1.2299,... 1.5426], [1.3700, 1.0197, 1.4586], [1.0145, 1.9348, 1.2015]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0919, 1.8144, 1.0953], [1.0632, 1.3552, 1.4137], [1.2022, 1.0138, 1.0983], [1.0527,... 1.9314], [1.2248, 1.5670, 1.2072], [1.5283, 1.5033, 1.4354]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype1-dot-u-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.4065], [1.6453], [1.5940], [1.8834], [1.5205], [1.8767], [1...887], [1.7349], [1.4353], [1.0336], [1.9258]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1926], [1.7048], [1.0471], [1.1825], [1.9938], [1.8587], [1...590], [1.1581], [1.8913], [1.4191], [1.1389]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype1-dot-u-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.8913], [1.9761], [1.2691], [1.7124], [1.9891], [1.5234], [1...451], [1.4823], [1.4475], [1.2084], [1.0155]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1799], [1.9573], [1.0769], [1.6050], [1.0366], [1.6334], [1...927], [1.7050], [1.5606], [1.3233], [1.1366]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_dot_u) _____________________ test_sddmm[idtype1-dot-u-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.8929], [1.8172], [1.1081]]], [[[1.7914], [1.7172], ...5]]], [[[1.8290], [1.9116], [1.1230]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9307]], [[1.4847]], [[1.1425]]]], [[[[1.7151]], [[1.944... [[[[1.7452]], [[1.1922]], [[1.4873]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype1-dot-u-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.0015], [1.9556], [1.8495]]], [[[1.7520], [1.8096], ...7]]], [[[1.6085], [1.5341], [1.3644]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8238]], [[1.3152]], [[1.9954]]]], [[[[1.9656]], [[1.693... [[[[1.2299]], [[1.7161]], [[1.4107]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype1-dot-u-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.6087, 1.5758, 1.3744, ..., 1.3427, 1.0609, 1.8380]], [[1.7976, 1.8480, 1.7812, ..., 1.4050,... [[1.6688, 1.3049, 1.4868, ..., 1.7376, 1.0302, 1.2928]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9245, 1.3635, 1.0887, ..., 1.8764, 1.8067, 1.0915], [1.4775, 1.8152, 1.5350, ..., 1.3062, 1... [1.1121, 1.0329, 1.5102, ..., 1.8420, 1.9678, 1.0070]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype1-dot-u-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.2297, 1.5828, 1.4560, ..., 1.9342, 1.5930, 1.9337]], [[1.2020, 1.1646, 1.0314, ..., 1.3889,... [[1.7323, 1.2561, 1.0115, ..., 1.0068, 1.8932, 1.9372]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1379, 1.5543, 1.0774, ..., 1.0734, 1.5426, 1.0490], [1.6841, 1.2757, 1.2800, ..., 1.3195, 1... [1.3295, 1.9932, 1.9135, ..., 1.7510, 1.9703, 1.1179]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype1-dot-u-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.2564, 1.0165, 1.7853], [1.6427, 1.9880, 1.9496], [1.8283, 1.3121, 1.0973]]], ...9], [1.7624, 1.2640, 1.3450], [1.1071, 1.7139, 1.1210]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.0367, 1.7632, 1.7744]], [[1.6872, 1.5771, 1.4852]], [[1.1877, 1.9525, 1.1519]], ... [[1.3692, 1.5533, 1.1285]], [[1.2271, 1.2466, 1.8894]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype1-dot-u-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.8641, 1.0490, 1.0675], [1.7391, 1.0514, 1.2092], [1.1094, 1.3734, 1.8647]]], ...7], [1.4900, 1.9876, 1.6776], [1.3845, 1.1853, 1.7816]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9559, 1.0588, 1.5548]], [[1.1503, 1.6057, 1.0936]], [[1.7977, 1.5481, 1.7440]], ... [[1.7704, 1.4147, 1.3119]], [[1.4699, 1.7053, 1.3757]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype1-dot-u-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.6878, 1.8380, 1.3412], [1.4477, 1.1997, 1.7449], [1.1725, 1.1558, 1.7330], [1.4799,... 1.7423], [1.3610, 1.5671, 1.9384], [1.4528, 1.0491, 1.1346]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5589, 1.8930, 1.6787], [1.5708, 1.7006, 1.5178], [1.6503, 1.1264, 1.9338], [1.1213,... 1.9438], [1.1982, 1.1001, 1.4376], [1.8619, 1.9389, 1.7284]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype1-dot-u-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.1501, 1.2027, 1.8192], [1.0310, 1.5309, 1.4071], [1.9518, 1.2560, 1.4114], [1.5758,... 1.5170], [1.6794, 1.9234, 1.3274], [1.0308, 1.9787, 1.4570]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8581, 1.5765, 1.5019], [1.5453, 1.5669, 1.6639], [1.4623, 1.7714, 1.0711], [1.0080,... 1.5355], [1.2865, 1.5617, 1.5364], [1.4438, 1.2778, 1.5828]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype1-dot-u-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.7934], [1.7021], [1.7350], [1.1015], [1.3428], [1.3501], [1...227], [1.3342], [1.1890], [1.7169], [1.3256]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1553], [1.5367], [1.3074], [1.4910], [1.6408], [1.2142], [1...186], [1.8776], [1.2527], [1.0650], [1.3499]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype1-dot-u-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.1368], [1.2121], [1.8893], [1.2679], [1.0351], [1.4249], [1...781], [1.9054], [1.1142], [1.9216], [1.1599]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9453], [1.0233], [1.9063], [1.6982], [1.3402], [1.7719], [1...106], [1.4667], [1.9137], [1.0967], [1.4981]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_dot_u) _____________________ test_sddmm[idtype1-dot-v-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.0467], [1.4673], [1.8423]]], [[[1.7383], [1.4226], ...9]]], [[[1.7902], [1.1769], [1.9763]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3873]], [[1.2994]], [[1.3116]]]], [[[[1.7125]], [[1.245... [[[[1.3221]], [[1.4569]], [[1.0036]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype1-dot-v-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.3675], [1.7645], [1.6237]]], [[[1.0176], [1.6864], ...1]]], [[[1.0047], [1.1932], [1.5328]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5098]], [[1.5806]], [[1.3653]]]], [[[[1.4074]], [[1.726... [[[[1.8302]], [[1.5568]], [[1.6229]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype1-dot-v-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.8915, 1.5989, 1.1340, ..., 1.2125, 1.8818, 1.8904]], [[1.3225, 1.3147, 1.1275, ..., 1.5766,... [[1.2852, 1.6357, 1.5347, ..., 1.2923, 1.0561, 1.4420]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4277, 1.8787, 1.1416, ..., 1.8206, 1.9680, 1.0438], [1.5271, 1.1148, 1.6219, ..., 1.0966, 1... [1.0304, 1.5528, 1.6306, ..., 1.5635, 1.8186, 1.4631]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype1-dot-v-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.9506, 1.5653, 1.0005, ..., 1.3471, 1.1497, 1.2962]], [[1.4865, 1.8055, 1.7359, ..., 1.5358,... [[1.9473, 1.9040, 1.1555, ..., 1.6542, 1.7798, 1.3116]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4132, 1.8718, 1.3931, ..., 1.4979, 1.2243, 1.7997], [1.8917, 1.1275, 1.7960, ..., 1.4312, 1... [1.5247, 1.3845, 1.5499, ..., 1.5387, 1.7007, 1.1583]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype1-dot-v-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.0889, 1.9650, 1.9019], [1.3482, 1.7156, 1.9754], [1.6069, 1.2622, 1.0293]]], ...5], [1.0239, 1.9309, 1.2482], [1.9643, 1.9551, 1.2763]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6221, 1.8249, 1.0321]], [[1.2691, 1.6156, 1.6902]], [[1.4451, 1.1199, 1.2279]], ... [[1.8761, 1.2515, 1.2776]], [[1.2714, 1.8511, 1.7830]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype1-dot-v-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.9406, 1.9268, 1.7475], [1.1313, 1.9823, 1.2510], [1.2104, 1.6287, 1.0968]]], ...8], [1.5879, 1.6819, 1.1967], [1.6055, 1.4173, 1.6767]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8736, 1.1848, 1.9562]], [[1.9488, 1.1951, 1.0181]], [[1.6250, 1.4908, 1.5382]], ... [[1.5130, 1.5392, 1.5847]], [[1.7939, 1.6738, 1.0989]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype1-dot-v-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.3113, 1.3529, 1.2787], [1.6873, 1.3606, 1.4951], [1.3553, 1.3898, 1.9577], [1.3161,... 1.8151], [1.5692, 1.1288, 1.3587], [1.2505, 1.7218, 1.1145]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1791, 1.8427, 1.3442], [1.7211, 1.7867, 1.0867], [1.9562, 1.2629, 1.3715], [1.1341,... 1.1920], [1.2003, 1.7139, 1.4543], [1.1261, 1.9884, 1.2712]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype1-dot-v-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.3878, 1.6107, 1.8187], [1.8357, 1.8281, 1.8685], [1.2801, 1.7859, 1.6465], [1.9826,... 1.0161], [1.7712, 1.4592, 1.9401], [1.1659, 1.1624, 1.0600]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0156, 1.7377, 1.8359], [1.4311, 1.5457, 1.5336], [1.0907, 1.4142, 1.3061], [1.7312,... 1.3949], [1.8827, 1.0167, 1.8519], [1.2620, 1.5207, 1.7247]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype1-dot-v-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.2055], [1.9136], [1.4972], [1.7878], [1.5796], [1.9549], [1...501], [1.5069], [1.6796], [1.1126], [1.0139]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6940], [1.7052], [1.8491], [1.3453], [1.4438], [1.4881], [1...459], [1.5810], [1.3541], [1.1650], [1.5332]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype1-dot-v-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.8671], [1.0599], [1.4775], [1.7413], [1.3570], [1.9611], [1...460], [1.0361], [1.8306], [1.8849], [1.6426]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5792], [1.8845], [1.6502], [1.4301], [1.3093], [1.8026], [1...142], [1.8429], [1.8652], [1.9344], [1.0605]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_dot_v) _____________________ test_sddmm[idtype1-dot-v-e-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.3624], [1.9463], [1.8961]]], [[[1.3172], [1.1322], ...4]]], [[[1.6084], [1.2910], [1.8822]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.6585]], [[1.5509]], [[1.4005]]]], [[[[1.9506]], [[1.699... [[[[1.9920]], [[1.7987]], [[1.7522]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype1-dot-v-e-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.5385], [1.7244], [1.7215]]], [[[1.7658], [1.1687], ...2]]], [[[1.7080], [1.6922], [1.4262]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2511]], [[1.5675]], [[1.7078]]]], [[[[1.4187]], [[1.503... [[[[1.0737]], [[1.7920]], [[1.7068]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype1-dot-v-e-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.7451, 1.1100, 1.6950, ..., 1.9527, 1.4761, 1.6414]], [[1.3741, 1.8333, 1.2986, ..., 1.7411,... [[1.4123, 1.4143, 1.7928, ..., 1.4411, 1.6050, 1.3844]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.5321, 1.1202, 1.0931, ..., 1.8870, 1.9547, 1.9141], [1.4783, 1.9403, 1.1381, ..., 1.9157, 1... [1.9914, 1.9464, 1.2886, ..., 1.6032, 1.1797, 1.2098]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype1-dot-v-e-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.1774, 1.5003, 1.0867, ..., 1.5996, 1.3963, 1.4214]], [[1.7921, 1.6893, 1.6427, ..., 1.5893,... [[1.6904, 1.9930, 1.0986, ..., 1.1158, 1.6970, 1.8260]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.5827, 1.2240, 1.1486, ..., 1.4725, 1.2519, 1.4280], [1.8428, 1.3703, 1.0524, ..., 1.3406, 1... [1.4292, 1.6483, 1.6035, ..., 1.9275, 1.0854, 1.2038]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype1-dot-v-e-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.3708, 1.2076, 1.4345], [1.3086, 1.0393, 1.0773], [1.3688, 1.2844, 1.8632]]], ...1], [1.6034, 1.1851, 1.2510], [1.0352, 1.0183, 1.2309]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5150, 1.9627, 1.9241]], [[1.9398, 1.0143, 1.9653]], [[1.8158, 1.0311, 1.8225]], ... [[1.4878, 1.5913, 1.6467]], [[1.1007, 1.8505, 1.3870]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype1-dot-v-e-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.5956, 1.5682, 1.8542], [1.0390, 1.0541, 1.5640], [1.1749, 1.1664, 1.7720]]], ...9], [1.9764, 1.1421, 1.1666], [1.4514, 1.3994, 1.0554]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8922, 1.5315, 1.3230]], [[1.7347, 1.3464, 1.5292]], [[1.5697, 1.0394, 1.2550]], ... [[1.6620, 1.0940, 1.0252]], [[1.0695, 1.9404, 1.1487]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype1-dot-v-e-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.5077, 1.2882, 1.4224], [1.7700, 1.0649, 1.1696], [1.4030, 1.8990, 1.9476], [1.6898,... 1.2822], [1.1009, 1.5197, 1.4961], [1.8869, 1.3446, 1.3204]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9543, 1.2781, 1.9834], [1.9718, 1.6689, 1.1482], [1.6806, 1.0704, 1.6283], [1.2114,... 1.7363], [1.0768, 1.1985, 1.6570], [1.5389, 1.5287, 1.8190]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype1-dot-v-e-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.8884, 1.1693, 1.5430], [1.9204, 1.6840, 1.1354], [1.6232, 1.6689, 1.0184], [1.5278,... 1.3114], [1.3214, 1.1851, 1.3446], [1.9926, 1.3957, 1.4255]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9318, 1.7552, 1.1033], [1.2841, 1.3438, 1.9675], [1.8555, 1.1081, 1.6052], [1.2148,... 1.4506], [1.6662, 1.5969, 1.5379], [1.5267, 1.7346, 1.6486]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype1-dot-v-e-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.1012], [1.1866], [1.8903], [1.8884], [1.4342], [1.9021], [1...694], [1.4694], [1.5698], [1.1110], [1.3046]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3297], [1.3408], [1.2274], [1.5583], [1.8539], [1.6226], [1...680], [1.0438], [1.8081], [1.1909], [1.4280]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype1-dot-v-e-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.8790], [1.4774], [1.6299], [1.6946], [1.4382], [1.0692], [1...163], [1.9907], [1.1154], [1.2377], [1.4193]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1208], [1.3626], [1.3239], [1.3982], [1.8120], [1.8535], [1...597], [1.0594], [1.8772], [1.2056], [1.7497]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_dot_v) _____________________ test_sddmm[idtype1-dot-e-u-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.1945], [1.9400], [1.9952]]], [[[1.9620], [1.9267], ...4]]], [[[1.2246], [1.2643], [1.7787]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2624]], [[1.8297]], [[1.5291]]]], [[[[1.8235]], [[1.853... [[[[1.4873]], [[1.7090]], [[1.6346]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype1-dot-e-u-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.5628], [1.1029], [1.0513]]], [[[1.6238], [1.3869], ...8]]], [[[1.2889], [1.8150], [1.5238]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5960]], [[1.2302]], [[1.8312]]]], [[[[1.6356]], [[1.247... [[[[1.9315]], [[1.7624]], [[1.5983]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype1-dot-e-u-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.0774, 1.5703, 1.2509, ..., 1.3731, 1.3843, 1.2944]], [[1.8360, 1.5992, 1.6855, ..., 1.7838,... [[1.2743, 1.0855, 1.4721, ..., 1.6444, 1.1865, 1.9987]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4922, 1.4791, 1.6518, ..., 1.7840, 1.7998, 1.5239], [1.5792, 1.1244, 1.9514, ..., 1.9750, 1... [1.8071, 1.2642, 1.6370, ..., 1.8452, 1.5992, 1.5798]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype1-dot-e-u-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.0130, 1.5309, 1.8017, ..., 1.4307, 1.5458, 1.2170]], [[1.3876, 1.4547, 1.2631, ..., 1.6803,... [[1.6241, 1.8992, 1.3570, ..., 1.0490, 1.1917, 1.9640]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.3935, 1.7812, 1.8697, ..., 1.5745, 1.0253, 1.7465], [1.9389, 1.5610, 1.7873, ..., 1.6695, 1... [1.2640, 1.2689, 1.9104, ..., 1.2295, 1.8020, 1.2428]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype1-dot-e-u-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.9378, 1.6432, 1.9686], [1.8623, 1.6083, 1.9538], [1.1355, 1.5319, 1.6126]]], ...3], [1.6490, 1.8281, 1.9841], [1.8922, 1.2374, 1.1853]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1048, 1.0971, 1.7056]], [[1.5415, 1.6967, 1.8207]], [[1.5947, 1.6955, 1.4455]], ... [[1.3166, 1.1456, 1.0038]], [[1.6948, 1.7281, 1.1908]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype1-dot-e-u-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.3032, 1.7717, 1.9217], [1.6208, 1.6544, 1.8709], [1.4111, 1.5549, 1.7264]]], ...5], [1.4005, 1.9868, 1.7700], [1.8589, 1.4737, 1.8538]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9765, 1.7628, 1.1244]], [[1.6103, 1.5165, 1.3714]], [[1.1755, 1.2812, 1.7415]], ... [[1.6052, 1.7177, 1.1160]], [[1.5347, 1.0729, 1.7811]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype1-dot-e-u-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.1682, 1.8107, 1.9583], [1.9434, 1.4679, 1.1802], [1.0121, 1.4922, 1.0515], [1.5045,... 1.8074], [1.0643, 1.5140, 1.6761], [1.8237, 1.1094, 1.6819]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6334, 1.7235, 1.4422], [1.7956, 1.0305, 1.1689], [1.2592, 1.6216, 1.7027], [1.6347,... 1.9869], [1.0558, 1.0621, 1.9528], [1.6053, 1.9537, 1.5277]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype1-dot-e-u-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.2733, 1.8742, 1.0625], [1.7340, 1.0669, 1.9306], [1.7317, 1.6711, 1.0970], [1.7523,... 1.2587], [1.6483, 1.9892, 1.9828], [1.6099, 1.9064, 1.1331]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9571, 1.1342, 1.2127], [1.7155, 1.6162, 1.3206], [1.9695, 1.4545, 1.8853], [1.0698,... 1.4339], [1.3925, 1.7291, 1.1365], [1.8440, 1.3762, 1.9897]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype1-dot-e-u-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.2130], [1.5956], [1.6414], [1.6334], [1.7067], [1.9887], [1...762], [1.9131], [1.4939], [1.6461], [1.4341]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0316], [1.4667], [1.7724], [1.5337], [1.6631], [1.7637], [1...855], [1.1573], [1.2662], [1.0914], [1.1998]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype1-dot-e-u-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.5709], [1.4020], [1.2995], [1.5997], [1.9352], [1.2500], [1...320], [1.2015], [1.0277], [1.2282], [1.3244]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8539], [1.1600], [1.3582], [1.1121], [1.5962], [1.2492], [1...202], [1.1825], [1.1682], [1.2401], [1.1557]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_dot_e) _____________________ test_sddmm[idtype1-dot-e-v-shp0-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.9421], [1.4252], [1.6577]]], [[[1.6132], [1.5193], ...3]]], [[[1.5714], [1.4502], [1.3485]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1767]], [[1.5334]], [[1.9602]]]], [[[[1.1972]], [[1.688... [[[[1.9981]], [[1.5878]], [[1.0421]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype1-dot-e-v-shp0-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[[1.4707], [1.9657], [1.6542]]], [[[1.4865], [1.7533], ...5]]], [[[1.9119], [1.9567], [1.7391]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.6920]], [[1.3153]], [[1.6453]]]], [[[[1.2596]], [[1.517... [[[[1.2392]], [[1.8777]], [[1.3116]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype1-dot-e-v-shp1-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.3262, 1.6335, 1.0949, ..., 1.0140, 1.2495, 1.7573]], [[1.7617, 1.9038, 1.6343, ..., 1.7327,... [[1.2469, 1.0776, 1.0710, ..., 1.6969, 1.9579, 1.3821]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6950, 1.1490, 1.0880, ..., 1.4891, 1.2473, 1.1150], [1.4769, 1.0638, 1.7498, ..., 1.6114, 1... [1.8052, 1.9947, 1.8003, ..., 1.5738, 1.3147, 1.5358]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype1-dot-e-v-shp1-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'dot', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[[1.1927, 1.7452, 1.9939, ..., 1.4078, 1.1441, 1.5442]], [[1.7687, 1.3602, 1.6473, ..., 1.2877,... [[1.2351, 1.5235, 1.2418, ..., 1.3609, 1.8298, 1.6583]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.3881, 1.0503, 1.3004, ..., 1.7390, 1.2464, 1.4040], [1.1079, 1.4754, 1.1690, ..., 1.0385, 1... [1.8119, 1.7097, 1.9615, ..., 1.3602, 1.7249, 1.7553]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype1-dot-e-v-shp2-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.5362, 1.1293, 1.1563], [1.3976, 1.7856, 1.9868], [1.6447, 1.2823, 1.9782]]], ...9], [1.6567, 1.7843, 1.4148], [1.7014, 1.9047, 1.7100]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5737, 1.4187, 1.9404]], [[1.8145, 1.2146, 1.2003]], [[1.6665, 1.0760, 1.4772]], ... [[1.9016, 1.6986, 1.8525]], [[1.4131, 1.4121, 1.0188]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype1-dot-e-v-shp2-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[[[1.7355, 1.2201, 1.8749], [1.0209, 1.2152, 1.2575], [1.0469, 1.8115, 1.9880]]], ...4], [1.2193, 1.1487, 1.4605], [1.8169, 1.1896, 1.9021]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.0821, 1.4975, 1.0069]], [[1.8373, 1.1774, 1.5682]], [[1.1486, 1.7594, 1.7199]], ... [[1.6236, 1.8134, 1.6707]], [[1.9847, 1.4068, 1.7313]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype1-dot-e-v-shp3-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.7478, 1.4414, 1.3539], [1.2022, 1.4407, 1.1001], [1.1745, 1.4530, 1.7985], [1.3743,... 1.3778], [1.6764, 1.2867, 1.8189], [1.6522, 1.9301, 1.9552]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3251, 1.5877, 1.1435], [1.0050, 1.6677, 1.4602], [1.7983, 1.3158, 1.0337], [1.5934,... 1.5654], [1.4431, 1.7522, 1.9819], [1.3436, 1.9348, 1.1541]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype1-dot-e-v-shp3-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.6681, 1.7116, 1.9496], [1.6292, 1.6256, 1.2970], [1.2133, 1.3908, 1.3690], [1.3701,... 1.1282], [1.5681, 1.2565, 1.3712], [1.3950, 1.1303, 1.4192]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5167, 1.8730, 1.4248], [1.8278, 1.7106, 1.3435], [1.8316, 1.4778, 1.1982], [1.1976,... 1.6698], [1.0373, 1.9943, 1.1253], [1.8003, 1.0035, 1.2439]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype1-dot-e-v-shp4-g0] ______________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.9119], [1.3743], [1.3638], [1.5771], [1.1736], [1.2490], [1...657], [1.0048], [1.1303], [1.9523], [1.1776]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7678], [1.7468], [1.4362], [1.0762], [1.5290], [1.4939], [1...708], [1.0839], [1.9640], [1.3522], [1.1006]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_dot_e) _____________________ test_sddmm[idtype1-dot-e-v-shp4-g1] ______________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'dot' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'dot' lhs_data = tensor([[1.1687], [1.7145], [1.1558], [1.2259], [1.2953], [1.5087], [1...701], [1.0115], [1.6604], [1.5836], [1.0106]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6716], [1.6043], [1.4566], [1.8221], [1.0443], [1.4419], [1...654], [1.2677], [1.1911], [1.0997], [1.5732]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_dot_e) ___________________ test_sddmm[idtype1-copy_lhs-u-v-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.8675], [1.0206], [1.2845]]], [[[1.8718], [1.7337], ...2]]], [[[1.1574], [1.4678], [1.4400]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8690]], [[1.4323]], [[1.6786]]]], [[[[1.8232]], [[1.504... [[[[1.8948]], [[1.0844]], [[1.4069]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-v-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.2479], [1.1303], [1.4148]]], [[[1.2507], [1.6994], ...8]]], [[[1.6696], [1.8992], [1.6719]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2308]], [[1.6940]], [[1.7230]]]], [[[[1.4613]], [[1.310... [[[[1.4941]], [[1.3921]], [[1.8305]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-v-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.4776, 1.9842, 1.4483, ..., 1.7004, 1.7314, 1.9909]], [[1.7723, 1.5797, 1.6623, ..., 1.1283,... [[1.3820, 1.7274, 1.9855, ..., 1.9915, 1.9667, 1.1931]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2706, 1.9567, 1.9919, ..., 1.0507, 1.1191, 1.8330], [1.0977, 1.9548, 1.8454, ..., 1.3631, 1... [1.8445, 1.5661, 1.3754, ..., 1.7456, 1.0591, 1.2400]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-v-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.7395, 1.0039, 1.3932, ..., 1.1242, 1.0033, 1.2328]], [[1.9298, 1.3646, 1.2446, ..., 1.7217,... [[1.8530, 1.6091, 1.9977, ..., 1.1662, 1.4236, 1.0533]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.4937, 1.4621, 1.8748, ..., 1.0403, 1.5010, 1.8164], [1.2791, 1.0800, 1.5329, ..., 1.0259, 1... [1.3815, 1.5010, 1.7820, ..., 1.4184, 1.1208, 1.5196]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-v-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.9954, 1.4527, 1.8213], [1.0003, 1.3334, 1.1451], [1.8654, 1.1197, 1.9362]]], ...5], [1.7221, 1.2098, 1.7111], [1.0525, 1.4768, 1.9315]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1021, 1.2765, 1.0858]], [[1.5933, 1.8655, 1.3214]], [[1.9619, 1.9870, 1.1616]], ... [[1.0829, 1.9557, 1.7908]], [[1.4774, 1.2727, 1.0532]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-v-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.9108, 1.5720, 1.0175], [1.4092, 1.0124, 1.4880], [1.5475, 1.9800, 1.5418]]], ...5], [1.5868, 1.4542, 1.9297], [1.9646, 1.1813, 1.6155]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.2904, 1.8206, 1.4082]], [[1.4669, 1.1782, 1.6900]], [[1.3639, 1.4182, 1.4173]], ... [[1.4758, 1.3729, 1.1856]], [[1.7855, 1.2546, 1.7272]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-v-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.0380, 1.5109, 1.2718], [1.0118, 1.0570, 1.9879], [1.4740, 1.3289, 1.8788], [1.8905,... 1.9536], [1.3018, 1.9381, 1.0982], [1.2108, 1.9424, 1.3818]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5467, 1.7791, 1.2854], [1.9377, 1.3534, 1.3228], [1.4860, 1.6616, 1.8030], [1.1258,... 1.0048], [1.9627, 1.1595, 1.4121], [1.1290, 1.4941, 1.4106]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-v-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.1638, 1.2192, 1.3485], [1.6900, 1.6789, 1.3356], [1.7039, 1.7088, 1.6117], [1.7337,... 1.7583], [1.4732, 1.7152, 1.8431], [1.4816, 1.2754, 1.4495]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7489, 1.8365, 1.0498], [1.6201, 1.1686, 1.5363], [1.3387, 1.1068, 1.8025], [1.4558,... 1.9082], [1.8495, 1.6834, 1.8283], [1.5437, 1.7082, 1.0204]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-v-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.2179], [1.4187], [1.7139], [1.4933], [1.3050], [1.1470], [1...210], [1.9160], [1.4267], [1.1241], [1.8079]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0348], [1.1660], [1.1915], [1.9635], [1.8000], [1.2538], [1...124], [1.9605], [1.4172], [1.0912], [1.2544]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-v-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.8500], [1.3071], [1.8243], [1.4282], [1.0176], [1.9596], [1...173], [1.0747], [1.3580], [1.6767], [1.3225]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1051], [1.9993], [1.9146], [1.1581], [1.9463], [1.2304], [1...232], [1.4077], [1.7607], [1.2180], [1.7963]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-e-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.8004], [1.7387], [1.1754]]], [[[1.8799], [1.8597], ...3]]], [[[1.2546], [1.1127], [1.2069]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8697]], [[1.4444]], [[1.8412]]]], [[[[1.8775]], [[1.732... [[[[1.4692]], [[1.1575]], [[1.8933]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-e-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.9791], [1.2185], [1.0215]]], [[[1.8914], [1.0920], ...0]]], [[[1.8214], [1.4672], [1.1450]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5282]], [[1.9131]], [[1.1447]]]], [[[[1.1783]], [[1.953... [[[[1.6610]], [[1.7401]], [[1.4187]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-e-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.0841, 1.6331, 1.0541, ..., 1.1931, 1.2801, 1.1540]], [[1.7310, 1.2420, 1.3768, ..., 1.0252,... [[1.1757, 1.1763, 1.9841, ..., 1.5540, 1.9391, 1.9718]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6860, 1.6989, 1.8702, ..., 1.9080, 1.8519, 1.4444], [1.1979, 1.9445, 1.2948, ..., 1.7514, 1... [1.4639, 1.7800, 1.5167, ..., 1.9648, 1.0010, 1.4364]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-e-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.4043, 1.8206, 1.3378, ..., 1.8676, 1.2239, 1.9980]], [[1.2413, 1.4278, 1.9326, ..., 1.3758,... [[1.3958, 1.7872, 1.5683, ..., 1.8476, 1.8194, 1.5296]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.6126, 1.8564, 1.4484, ..., 1.5429, 1.5396, 1.4027], [1.6241, 1.6291, 1.3392, ..., 1.4397, 1... [1.5192, 1.4930, 1.5353, ..., 1.7218, 1.6692, 1.0060]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-e-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.3765, 1.3587, 1.8016], [1.7584, 1.9209, 1.3575], [1.4821, 1.1172, 1.4550]]], ...6], [1.6346, 1.6075, 1.5385], [1.2873, 1.5116, 1.3330]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.0976, 1.5019, 1.1483]], [[1.1734, 1.4702, 1.1699]], [[1.4126, 1.4572, 1.7934]], ... [[1.1007, 1.8222, 1.2812]], [[1.4515, 1.7894, 1.2175]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-e-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.9454, 1.2228, 1.8152], [1.0698, 1.9326, 1.0942], [1.8140, 1.0939, 1.1496]]], ...5], [1.7336, 1.7107, 1.0348], [1.6234, 1.4649, 1.3446]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.9643, 1.1033, 1.9716]], [[1.5143, 1.4862, 1.8411]], [[1.8734, 1.1087, 1.5753]], ... [[1.0258, 1.2825, 1.9649]], [[1.2877, 1.6651, 1.9773]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-e-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.8907, 1.5974, 1.6311], [1.5089, 1.5788, 1.9862], [1.3157, 1.1306, 1.7316], [1.2479,... 1.1356], [1.1336, 1.6385, 1.8058], [1.1578, 1.1647, 1.4084]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3833, 1.2045, 1.9940], [1.8560, 1.2968, 1.7646], [1.9188, 1.5058, 1.8088], [1.5003,... 1.1127], [1.7024, 1.8444, 1.9861], [1.1373, 1.0344, 1.3795]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-e-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.1981, 1.5950, 1.1628], [1.4137, 1.4414, 1.1546], [1.4394, 1.3592, 1.2561], [1.1672,... 1.3905], [1.5206, 1.5852, 1.9453], [1.4618, 1.6912, 1.1424]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3319, 1.6451, 1.6144], [1.0310, 1.2091, 1.0171], [1.0004, 1.5154, 1.1175], [1.7756,... 1.2391], [1.2468, 1.9850, 1.9108], [1.1753, 1.7900, 1.3673]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-e-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.9316], [1.7104], [1.2784], [1.1190], [1.7336], [1.1146], [1...722], [1.4331], [1.3561], [1.6965], [1.1979]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2909], [1.7442], [1.3241], [1.9588], [1.6560], [1.4196], [1...992], [1.7830], [1.1639], [1.6030], [1.7170]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-u-e-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.3541], [1.7342], [1.5055], [1.4516], [1.9896], [1.2369], [1...843], [1.6608], [1.3701], [1.3041], [1.8732]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3701], [1.3462], [1.8756], [1.9082], [1.2503], [1.7995], [1...924], [1.5982], [1.7592], [1.5850], [1.2001]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_lhs_u) ___________________ test_sddmm[idtype1-copy_lhs-v-u-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.1819], [1.0431], [1.8339]]], [[[1.1863], [1.6217], ...4]]], [[[1.3009], [1.0252], [1.2716]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7874]], [[1.6619]], [[1.3176]]]], [[[[1.0016]], [[1.829... [[[[1.7675]], [[1.7694]], [[1.4944]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-u-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.3526], [1.1789], [1.6073]]], [[[1.8965], [1.0257], ...4]]], [[[1.5432], [1.9902], [1.2742]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.3220]], [[1.6849]], [[1.4223]]]], [[[[1.8699]], [[1.471... [[[[1.4108]], [[1.8084]], [[1.6405]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-u-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.5561, 1.8871, 1.0184, ..., 1.5064, 1.3270, 1.9694]], [[1.2264, 1.0025, 1.3948, ..., 1.1065,... [[1.0992, 1.9297, 1.4461, ..., 1.9264, 1.1843, 1.7846]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8211, 1.9612, 1.9631, ..., 1.0152, 1.4112, 1.3692], [1.2476, 1.0526, 1.3748, ..., 1.6339, 1... [1.1154, 1.6598, 1.7627, ..., 1.5784, 1.8155, 1.4158]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-u-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.1794, 1.6164, 1.8565, ..., 1.5151, 1.5993, 1.0030]], [[1.1231, 1.0350, 1.8215, ..., 1.7676,... [[1.9976, 1.8567, 1.8239, ..., 1.7237, 1.9598, 1.0943]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2856, 1.8023, 1.6034, ..., 1.9224, 1.8205, 1.6218], [1.2171, 1.1685, 1.4702, ..., 1.1953, 1... [1.7701, 1.1971, 1.4810, ..., 1.3332, 1.4286, 1.5929]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-u-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.8541, 1.8513, 1.8445], [1.7373, 1.2497, 1.4329], [1.6225, 1.4784, 1.0719]]], ...9], [1.3459, 1.2811, 1.3829], [1.8964, 1.0527, 1.1006]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.3944, 1.7932, 1.0765]], [[1.0277, 1.8146, 1.0207]], [[1.5182, 1.8331, 1.5898]], ... [[1.9572, 1.6994, 1.4964]], [[1.9161, 1.6408, 1.2235]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-u-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.1912, 1.9733, 1.1927], [1.3544, 1.1651, 1.1696], [1.0857, 1.9177, 1.1933]]], ...1], [1.9690, 1.2502, 1.4173], [1.5183, 1.1739, 1.9657]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5289, 1.4814, 1.9917]], [[1.4739, 1.7412, 1.4337]], [[1.8769, 1.8290, 1.8942]], ... [[1.7712, 1.7710, 1.3239]], [[1.6718, 1.2734, 1.4105]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-u-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.7595, 1.4911, 1.8291], [1.6259, 1.0092, 1.5723], [1.0257, 1.5861, 1.7728], [1.7821,... 1.8016], [1.1909, 1.6391, 1.2835], [1.6429, 1.7901, 1.0500]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1631, 1.6660, 1.7548], [1.7207, 1.7261, 1.7475], [1.5909, 1.9028, 1.6724], [1.7011,... 1.7503], [1.5954, 1.8909, 1.9868], [1.7760, 1.9593, 1.8097]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-u-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.4060, 1.9452, 1.3289], [1.2762, 1.3225, 1.8319], [1.6641, 1.6746, 1.9498], [1.0168,... 1.5622], [1.2169, 1.9802, 1.1071], [1.3679, 1.1979, 1.1032]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8127, 1.9901, 1.2777], [1.6354, 1.1500, 1.5413], [1.6792, 1.2982, 1.5686], [1.9665,... 1.5302], [1.0600, 1.6068, 1.9847], [1.4871, 1.6399, 1.1527]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-u-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.2681], [1.9923], [1.9238], [1.1621], [1.0512], [1.3139], [1...832], [1.5573], [1.2067], [1.0624], [1.3485]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.4228], [1.0737], [1.7576], [1.0290], [1.5293], [1.4278], [1...166], [1.7090], [1.8847], [1.6914], [1.8191]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-u-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.5129], [1.6158], [1.0755], [1.3483], [1.0305], [1.2443], [1...713], [1.6458], [1.3623], [1.7179], [1.0628]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8016], [1.6342], [1.4983], [1.3553], [1.4266], [1.7006], [1...910], [1.5633], [1.5668], [1.5028], [1.3914]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-e-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.5734], [1.1405], [1.7113]]], [[[1.2978], [1.0789], ...7]]], [[[1.2074], [1.0350], [1.1639]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1872]], [[1.1673]], [[1.9346]]]], [[[[1.5809]], [[1.991... [[[[1.9763]], [[1.9414]], [[1.3441]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-e-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.5317], [1.4458], [1.5616]]], [[[1.9567], [1.4268], ...2]]], [[[1.9130], [1.0495], [1.0960]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1073]], [[1.6466]], [[1.8807]]]], [[[[1.1860]], [[1.682... [[[[1.2196]], [[1.0422]], [[1.4604]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-e-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.0753, 1.1372, 1.7311, ..., 1.6447, 1.0433, 1.9471]], [[1.5203, 1.9014, 1.5135, ..., 1.8196,... [[1.6356, 1.8445, 1.6899, ..., 1.7706, 1.9419, 1.1745]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8705, 1.0115, 1.5437, ..., 1.0341, 1.2846, 1.8375], [1.1253, 1.0789, 1.1565, ..., 1.2553, 1... [1.9245, 1.7686, 1.2121, ..., 1.0896, 1.6521, 1.5982]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-e-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.6210, 1.4089, 1.8352, ..., 1.3706, 1.7449, 1.8426]], [[1.1158, 1.3159, 1.9025, ..., 1.3879,... [[1.4771, 1.8165, 1.0119, ..., 1.8106, 1.9181, 1.8695]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9510, 1.4526, 1.3147, ..., 1.5819, 1.0340, 1.9429], [1.1381, 1.5083, 1.2966, ..., 1.2857, 1... [1.1451, 1.6690, 1.2771, ..., 1.7415, 1.1328, 1.7266]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-e-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.5295, 1.9029, 1.2710], [1.9051, 1.7200, 1.2496], [1.1494, 1.1254, 1.8200]]], ...2], [1.9319, 1.0570, 1.7514], [1.2434, 1.0833, 1.1355]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6901, 1.1123, 1.5896]], [[1.0123, 1.1778, 1.1965]], [[1.0898, 1.8625, 1.9610]], ... [[1.7705, 1.1424, 1.6507]], [[1.8691, 1.1874, 1.8027]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-e-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.5354, 1.2174, 1.0828], [1.3556, 1.3016, 1.1139], [1.9361, 1.0697, 1.4871]]], ...0], [1.4658, 1.1459, 1.5662], [1.5611, 1.2196, 1.5398]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.7081, 1.9562, 1.4367]], [[1.3434, 1.5381, 1.3691]], [[1.3118, 1.3399, 1.0414]], ... [[1.7419, 1.6202, 1.6748]], [[1.3423, 1.7560, 1.1994]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-e-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.0050, 1.5193, 1.1750], [1.5025, 1.6315, 1.0934], [1.2715, 1.8252, 1.2383], [1.6853,... 1.4193], [1.7477, 1.0726, 1.1497], [1.4576, 1.2901, 1.1567]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7333, 1.0006, 1.1735], [1.1952, 1.6252, 1.0293], [1.0436, 1.2068, 1.6157], [1.0427,... 1.4087], [1.5819, 1.6604, 1.5157], [1.2078, 1.8540, 1.2035]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-e-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.7094, 1.4927, 1.7456], [1.0073, 1.7838, 1.1406], [1.7166, 1.3654, 1.0492], [1.9667,... 1.8427], [1.5711, 1.7431, 1.1064], [1.4164, 1.5577, 1.5394]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.5906, 1.8498, 1.8729], [1.5606, 1.2851, 1.7560], [1.3134, 1.1908, 1.6316], [1.6748,... 1.4669], [1.8417, 1.8713, 1.8511], [1.1412, 1.1232, 1.0036]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-e-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.4950], [1.5639], [1.0186], [1.3247], [1.0631], [1.0082], [1...885], [1.5755], [1.9805], [1.8463], [1.1116]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8780], [1.9256], [1.9051], [1.5508], [1.4576], [1.7243], [1...428], [1.1386], [1.1359], [1.5470], [1.5669]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-v-e-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.0860], [1.2440], [1.2445], [1.0440], [1.3505], [1.8235], [1...023], [1.9755], [1.1235], [1.9354], [1.6193]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9962], [1.5200], [1.6211], [1.5661], [1.2273], [1.4411], [1...761], [1.1355], [1.2812], [1.6534], [1.4744]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_copy_lhs_v) ___________________ test_sddmm[idtype1-copy_lhs-e-u-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.7601], [1.7939], [1.4167]]], [[[1.7733], [1.0478], ...9]]], [[[1.5769], [1.4548], [1.9332]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7977]], [[1.6251]], [[1.2796]]]], [[[[1.2260]], [[1.767... [[[[1.9805]], [[1.9779]], [[1.4258]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-u-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.9763], [1.4426], [1.1327]]], [[[1.8984], [1.8083], ...9]]], [[[1.1778], [1.8545], [1.3696]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2201]], [[1.8154]], [[1.1607]]]], [[[[1.0270]], [[1.089... [[[[1.4671]], [[1.6281]], [[1.6014]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-u-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.5867, 1.1550, 1.9071, ..., 1.9460, 1.7336, 1.3658]], [[1.2041, 1.0029, 1.1502, ..., 1.6352,... [[1.3715, 1.7795, 1.6505, ..., 1.4812, 1.7781, 1.2727]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.3842, 1.1454, 1.8414, ..., 1.7993, 1.1740, 1.0609], [1.1740, 1.9631, 1.5938, ..., 1.1665, 1... [1.5190, 1.7778, 1.7393, ..., 1.7129, 1.9409, 1.0582]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-u-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.9701, 1.7894, 1.3187, ..., 1.6066, 1.3179, 1.3830]], [[1.5272, 1.0412, 1.1507, ..., 1.4315,... [[1.2352, 1.6113, 1.9964, ..., 1.9928, 1.2116, 1.4515]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1616, 1.5474, 1.2346, ..., 1.9076, 1.9521, 1.1645], [1.6608, 1.5260, 1.9431, ..., 1.0269, 1... [1.4955, 1.1688, 1.9228, ..., 1.3397, 1.4086, 1.8147]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-u-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.1581, 1.5576, 1.1531], [1.4989, 1.8287, 1.0739], [1.6781, 1.2461, 1.4105]]], ...7], [1.9582, 1.4221, 1.0420], [1.5072, 1.0601, 1.9200]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.2365, 1.9025, 1.6727]], [[1.7989, 1.4046, 1.2815]], [[1.7886, 1.7697, 1.6580]], ... [[1.7329, 1.1433, 1.4419]], [[1.6142, 1.8687, 1.0488]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-u-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.5994, 1.3044, 1.2601], [1.1898, 1.6585, 1.5091], [1.1382, 1.0728, 1.7889]]], ...1], [1.1673, 1.6966, 1.9452], [1.8129, 1.7478, 1.2764]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6374, 1.1347, 1.0506]], [[1.0259, 1.2595, 1.3427]], [[1.9415, 1.1672, 1.7548]], ... [[1.1249, 1.1933, 1.2638]], [[1.7123, 1.8035, 1.4671]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-u-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.1524, 1.7764, 1.2455], [1.8400, 1.5934, 1.5055], [1.6096, 1.3194, 1.1589], [1.6952,... 1.1816], [1.5138, 1.5859, 1.4181], [1.0022, 1.2448, 1.7641]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3579, 1.5256, 1.1661], [1.1946, 1.4070, 1.8829], [1.6965, 1.3512, 1.8653], [1.8575,... 1.5090], [1.5951, 1.8295, 1.0938], [1.9126, 1.7937, 1.2962]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-u-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.8771, 1.7487, 1.8288], [1.9405, 1.3970, 1.9049], [1.6163, 1.3027, 1.9998], [1.2615,... 1.1422], [1.1329, 1.8300, 1.6169], [1.7184, 1.8721, 1.2175]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1439, 1.6657, 1.2080], [1.5668, 1.8912, 1.9410], [1.6489, 1.0878, 1.6488], [1.6279,... 1.1961], [1.7581, 1.9509, 1.4490], [1.8712, 1.4019, 1.5435]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-u-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.4236], [1.3263], [1.6429], [1.4935], [1.7650], [1.9343], [1...080], [1.3809], [1.7107], [1.6608], [1.6767]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6225], [1.7148], [1.4774], [1.3209], [1.1273], [1.8710], [1...986], [1.5104], [1.0052], [1.9681], [1.6338]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-u-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.9557], [1.8414], [1.7188], [1.6424], [1.5514], [1.6066], [1...559], [1.9973], [1.1468], [1.3689], [1.3196]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0706], [1.0113], [1.9389], [1.2934], [1.0140], [1.0298], [1...700], [1.0576], [1.9075], [1.6596], [1.8565]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-v-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.7965], [1.4236], [1.0504]]], [[[1.7718], [1.3539], ...0]]], [[[1.3637], [1.1902], [1.2667]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.1633]], [[1.5660]], [[1.3113]]]], [[[[1.9385]], [[1.011... [[[[1.7276]], [[1.4577]], [[1.3539]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-v-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[[1.2883], [1.7912], [1.2649]]], [[[1.3669], [1.1166], ...7]]], [[[1.4534], [1.0721], [1.3245]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9550]], [[1.7226]], [[1.0386]]]], [[[[1.5542]], [[1.078... [[[[1.7417]], [[1.0968]], [[1.0021]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-v-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.1367, 1.1999, 1.5531, ..., 1.8294, 1.4571, 1.8476]], [[1.8770, 1.2740, 1.4006, ..., 1.0029,... [[1.2366, 1.2043, 1.4790, ..., 1.7014, 1.3797, 1.0508]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8340, 1.2619, 1.8488, ..., 1.0034, 1.5965, 1.1817], [1.0523, 1.3089, 1.2682, ..., 1.1485, 1... [1.9518, 1.2787, 1.2554, ..., 1.3111, 1.3383, 1.1257]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-v-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[[1.7329, 1.6879, 1.0965, ..., 1.4957, 1.1479, 1.5070]], [[1.5472, 1.4661, 1.0626, ..., 1.1828,... [[1.1696, 1.5439, 1.2435, ..., 1.7212, 1.5104, 1.0815]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0825, 1.6407, 1.5275, ..., 1.3233, 1.3831, 1.6069], [1.8063, 1.7358, 1.1506, ..., 1.5304, 1... [1.7249, 1.3259, 1.2372, ..., 1.6936, 1.5356, 1.0344]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-v-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.2454, 1.9277, 1.9022], [1.5053, 1.7084, 1.2193], [1.5668, 1.4877, 1.1230]]], ...8], [1.8198, 1.1848, 1.8085], [1.7506, 1.5056, 1.8104]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.3308, 1.6315, 1.6962]], [[1.6126, 1.2834, 1.5749]], [[1.3179, 1.8458, 1.9977]], ... [[1.1543, 1.1017, 1.1460]], [[1.1239, 1.0624, 1.3871]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-v-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_lhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[[[1.8221, 1.6875, 1.5640], [1.8905, 1.4823, 1.9607], [1.3243, 1.2018, 1.6402]]], ...9], [1.4759, 1.5234, 1.2140], [1.2721, 1.8308, 1.3807]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5962, 1.1077, 1.1539]], [[1.0634, 1.6628, 1.4460]], [[1.1336, 1.2426, 1.5354]], ... [[1.2232, 1.1218, 1.6699]], [[1.5097, 1.0936, 1.9467]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-v-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.1907, 1.9696, 1.6135], [1.9904, 1.5014, 1.1565], [1.8698, 1.6235, 1.4486], [1.7127,... 1.3882], [1.4177, 1.6879, 1.9970], [1.7496, 1.6167, 1.4577]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9638, 1.1755, 1.0783], [1.2550, 1.8097, 1.8491], [1.0469, 1.2364, 1.4575], [1.2483,... 1.4466], [1.6013, 1.5042, 1.6380], [1.4088, 1.6600, 1.0450]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-v-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.9336, 1.7381, 1.4603], [1.6139, 1.6232, 1.0338], [1.9312, 1.9130, 1.9724], [1.4374,... 1.9102], [1.7352, 1.3562, 1.3169], [1.9285, 1.8843, 1.6642]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2354, 1.9116, 1.5873], [1.3373, 1.8557, 1.6164], [1.0514, 1.5079, 1.7633], [1.6622,... 1.2034], [1.3018, 1.6270, 1.7017], [1.8403, 1.6896, 1.3867]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-v-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.5008], [1.4250], [1.8372], [1.4599], [1.6957], [1.1407], [1...703], [1.0152], [1.3272], [1.1778], [1.4062]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7431], [1.5568], [1.9677], [1.5446], [1.1946], [1.0412], [1...940], [1.0818], [1.6499], [1.2651], [1.0512]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_lhs-e-v-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_lhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[1.4615], [1.5584], [1.4473], [1.9378], [1.8598], [1.4749], [1...129], [1.0186], [1.7965], [1.4086], [1.4539]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.2790], [1.3208], [1.1413], [1.1194], [1.4245], [1.9019], [1...215], [1.3027], [1.7433], [1.3230], [1.3154]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_copy_lhs_e) ___________________ test_sddmm[idtype1-copy_rhs-u-v-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.6291], [1.1225], [1.7415]]], [[[1.5790], [1.3822], ...1]]], [[[1.7486], [1.1184], [1.2849]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9585]], [[1.9334]], [[1.1448]]]], [[[[1.3890]], [[1.918... [[[[1.2672]], [[1.3649]], [[1.5683]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-v-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.4937], [1.4818], [1.8651]]], [[[1.3721], [1.2252], ...2]]], [[[1.3301], [1.1518], [1.6225]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.6242]], [[1.3513]], [[1.9170]]]], [[[[1.2673]], [[1.434... [[[[1.9827]], [[1.5036]], [[1.1952]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-v-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.2061, 1.2313, 1.5480, ..., 1.0157, 1.6696, 1.1659]], [[1.1176, 1.5177, 1.6735, ..., 1.5570,... [[1.1135, 1.2751, 1.2031, ..., 1.6622, 1.3587, 1.2122]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2331, 1.6460, 1.3804, ..., 1.8505, 1.9845, 1.0986], [1.4978, 1.7674, 1.0306, ..., 1.6187, 1... [1.0995, 1.3293, 1.5871, ..., 1.5373, 1.1107, 1.1755]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-v-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.8677, 1.3000, 1.3058, ..., 1.3390, 1.9030, 1.8391]], [[1.1638, 1.9529, 1.2580, ..., 1.8058,... [[1.8923, 1.0498, 1.1947, ..., 1.9103, 1.1152, 1.8329]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0625, 1.7962, 1.5774, ..., 1.1361, 1.4652, 1.6187], [1.3761, 1.3514, 1.2769, ..., 1.3487, 1... [1.9117, 1.2729, 1.4320, ..., 1.4265, 1.6270, 1.8220]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-v-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.4188, 1.9769, 1.7908], [1.6082, 1.7355, 1.1373], [1.6330, 1.1088, 1.7386]]], ...7], [1.6349, 1.6788, 1.2188], [1.6931, 1.6234, 1.9384]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4415, 1.6879, 1.8654]], [[1.1386, 1.8167, 1.7708]], [[1.1599, 1.1233, 1.0628]], ... [[1.7573, 1.6637, 1.2333]], [[1.3928, 1.0783, 1.6524]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-v-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.2492, 1.3507, 1.0544], [1.5834, 1.4312, 1.0431], [1.0876, 1.9952, 1.5027]]], ...6], [1.3787, 1.0537, 1.1970], [1.9258, 1.6100, 1.1759]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6347, 1.8895, 1.2946]], [[1.8939, 1.0732, 1.0343]], [[1.0201, 1.4766, 1.2948]], ... [[1.0581, 1.6616, 1.1952]], [[1.5389, 1.4259, 1.0294]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-v-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.2420, 1.3425, 1.0938], [1.9123, 1.7822, 1.6531], [1.7166, 1.9094, 1.3112], [1.9534,... 1.3844], [1.1347, 1.6533, 1.2640], [1.6527, 1.5157, 1.5305]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0392, 1.5213, 1.7219], [1.4467, 1.7178, 1.0755], [1.2275, 1.2936, 1.5568], [1.1621,... 1.0614], [1.5292, 1.3066, 1.1765], [1.8768, 1.1567, 1.3539]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-v-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.9762, 1.9818, 1.0645], [1.5738, 1.2004, 1.2534], [1.5774, 1.5576, 1.7900], [1.5631,... 1.9511], [1.6534, 1.7226, 1.8922], [1.3786, 1.5588, 1.6608]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3335, 1.5122, 1.6639], [1.8889, 1.7903, 1.4100], [1.5278, 1.4712, 1.1353], [1.4736,... 1.5093], [1.0424, 1.9540, 1.7089], [1.6041, 1.1928, 1.0700]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-v-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.9662], [1.2567], [1.4651], [1.7389], [1.1952], [1.8174], [1...777], [1.7647], [1.6692], [1.0981], [1.7161]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7650], [1.8312], [1.2242], [1.4089], [1.3459], [1.5577], [1...709], [1.2251], [1.9662], [1.3730], [1.9566]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-v-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.1264], [1.5515], [1.0168], [1.9571], [1.5646], [1.2618], [1...017], [1.8337], [1.8917], [1.0919], [1.0326]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6986], [1.4126], [1.4957], [1.0176], [1.5817], [1.0533], [1...679], [1.5279], [1.4464], [1.9217], [1.7157]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: v_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-e-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.0456], [1.8746], [1.8577]]], [[[1.9211], [1.4567], ...3]]], [[[1.2162], [1.8553], [1.4634]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.0702]], [[1.7371]], [[1.9267]]]], [[[[1.0578]], [[1.370... [[[[1.3504]], [[1.7387]], [[1.6076]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-e-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.6132], [1.3854], [1.4197]]], [[[1.4399], [1.0520], ...4]]], [[[1.2280], [1.5006], [1.5257]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.5059]], [[1.2807]], [[1.5405]]]], [[[[1.0886]], [[1.208... [[[[1.1911]], [[1.0602]], [[1.6928]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-e-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.7366, 1.9760, 1.6664, ..., 1.3234, 1.0130, 1.3156]], [[1.6481, 1.9281, 1.2488, ..., 1.8182,... [[1.7611, 1.2102, 1.9551, ..., 1.0094, 1.9745, 1.7921]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9706, 1.9971, 1.7967, ..., 1.0984, 1.7114, 1.8928], [1.7321, 1.2868, 1.9791, ..., 1.6976, 1... [1.1580, 1.7554, 1.6277, ..., 1.0015, 1.0374, 1.7349]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-e-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.7653, 1.8709, 1.5487, ..., 1.4052, 1.5651, 1.7352]], [[1.0579, 1.1672, 1.9320, ..., 1.7057,... [[1.4762, 1.5008, 1.5274, ..., 1.2248, 1.5698, 1.9883]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0299, 1.0493, 1.0160, ..., 1.5696, 1.4054, 1.4249], [1.7669, 1.0288, 1.3058, ..., 1.5749, 1... [1.5038, 1.8184, 1.8289, ..., 1.1258, 1.1972, 1.2629]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-e-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.2053, 1.0080, 1.2298], [1.3946, 1.9386, 1.5017], [1.1396, 1.4608, 1.3740]]], ...8], [1.9695, 1.0130, 1.7822], [1.0094, 1.3753, 1.9390]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.4886, 1.7947, 1.4601]], [[1.7778, 1.1963, 1.0723]], [[1.6671, 1.3425, 1.0912]], ... [[1.0365, 1.8839, 1.5855]], [[1.4497, 1.7258, 1.9009]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-e-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'u' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.0450, 1.1668, 1.4157], [1.5253, 1.6875, 1.9128], [1.7759, 1.2874, 1.3063]]], ...1], [1.5048, 1.4824, 1.1163], [1.3741, 1.7316, 1.2224]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.2775, 1.0533, 1.8606]], [[1.8647, 1.0778, 1.1814]], [[1.1673, 1.0298, 1.4962]], ... [[1.7116, 1.8171, 1.4476]], [[1.7066, 1.1091, 1.4305]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-e-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.6106, 1.6705, 1.1544], [1.8285, 1.3085, 1.0105], [1.2865, 1.3508, 1.8545], [1.5860,... 1.1928], [1.3755, 1.1253, 1.1409], [1.1397, 1.8812, 1.9931]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9261, 1.5791, 1.0972], [1.1572, 1.6486, 1.7997], [1.8080, 1.7699, 1.0784], [1.3790,... 1.5344], [1.5284, 1.6683, 1.6730], [1.4040, 1.2984, 1.7466]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-e-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.6096, 1.7304, 1.1662], [1.4496, 1.7845, 1.1277], [1.1843, 1.3712, 1.9589], [1.7669,... 1.4617], [1.1178, 1.0133, 1.6632], [1.9586, 1.5956, 1.4733]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0732, 1.5918, 1.6487], [1.9801, 1.2260, 1.1374], [1.3195, 1.8637, 1.0720], [1.9616,... 1.3366], [1.0032, 1.2965, 1.8382], [1.9083, 1.2041, 1.1688]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-e-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.0068], [1.9424], [1.0747], [1.9272], [1.4238], [1.7208], [1...447], [1.7090], [1.3542], [1.9789], [1.1186]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3103], [1.6103], [1.2788], [1.0737], [1.1961], [1.6237], [1...994], [1.4367], [1.9222], [1.8798], [1.9240]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-u-e-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'u', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.1983], [1.1172], [1.3279], [1.8725], [1.2942], [1.1378], [1...779], [1.0489], [1.3661], [1.9353], [1.0367]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.9565], [1.8461], [1.1652], [1.5473], [1.8192], [1.5600], [1...258], [1.7318], [1.7198], [1.2356], [1.2440]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'u' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_rhs_u) ___________________ test_sddmm[idtype1-copy_rhs-v-u-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.6794], [1.6223], [1.7873]]], [[[1.3737], [1.4448], ...5]]], [[[1.4546], [1.6278], [1.1623]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9937]], [[1.2781]], [[1.6332]]]], [[[[1.7344]], [[1.534... [[[[1.0389]], [[1.5966]], [[1.7849]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-u-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.7566], [1.6998], [1.4486]]], [[[1.8060], [1.0930], ...5]]], [[[1.0224], [1.1479], [1.7822]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.4029]], [[1.4715]], [[1.7374]]]], [[[[1.3103]], [[1.124... [[[[1.6265]], [[1.2343]], [[1.4707]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-u-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.4042, 1.1688, 1.1855, ..., 1.4124, 1.7078, 1.2472]], [[1.8047, 1.9794, 1.6942, ..., 1.7401,... [[1.2402, 1.6104, 1.5274, ..., 1.2434, 1.9983, 1.1747]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.8149, 1.8766, 1.0050, ..., 1.5803, 1.3096, 1.8311], [1.6920, 1.3454, 1.2707, ..., 1.8658, 1... [1.9819, 1.3907, 1.0912, ..., 1.2610, 1.0254, 1.7970]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-u-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.5741, 1.3235, 1.9688, ..., 1.3610, 1.0009, 1.4182]], [[1.4844, 1.3355, 1.1535, ..., 1.9405,... [[1.8138, 1.6719, 1.1254, ..., 1.2182, 1.3649, 1.1402]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2152, 1.4514, 1.3310, ..., 1.7310, 1.8854, 1.1400], [1.6516, 1.0507, 1.6635, ..., 1.9981, 1... [1.8831, 1.5544, 1.6347, ..., 1.2754, 1.9061, 1.6695]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-u-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.9125, 1.9425, 1.1205], [1.8770, 1.7751, 1.9139], [1.4841, 1.8045, 1.3477]]], ...6], [1.2569, 1.9663, 1.8199], [1.7656, 1.7314, 1.6401]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.7372, 1.3161, 1.4013]], [[1.6525, 1.3166, 1.1082]], [[1.6602, 1.3911, 1.1412]], ... [[1.0177, 1.2549, 1.8863]], [[1.1960, 1.1550, 1.5574]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-u-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.6892, 1.4718, 1.5288], [1.1747, 1.1061, 1.1128], [1.2894, 1.6658, 1.0061]]], ...7], [1.2705, 1.4962, 1.2117], [1.5349, 1.6612, 1.7823]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8535, 1.8309, 1.0402]], [[1.3840, 1.5707, 1.6884]], [[1.8716, 1.6700, 1.9982]], ... [[1.8960, 1.4371, 1.8981]], [[1.4773, 1.6815, 1.6405]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-u-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.8630, 1.9060, 1.1776], [1.0902, 1.3517, 1.5325], [1.8940, 1.4555, 1.6522], [1.4899,... 1.7218], [1.5857, 1.6441, 1.2994], [1.4839, 1.6116, 1.1497]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1736, 1.2842, 1.0662], [1.5156, 1.3894, 1.2688], [1.3504, 1.4003, 1.1837], [1.0969,... 1.4538], [1.6112, 1.0936, 1.7959], [1.9215, 1.5730, 1.0588]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-u-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.6159, 1.4771, 1.6227], [1.0444, 1.1715, 1.0147], [1.7458, 1.2922, 1.0138], [1.7999,... 1.4271], [1.7738, 1.6324, 1.1667], [1.5142, 1.9934, 1.6457]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8607, 1.2119, 1.4634], [1.2545, 1.9394, 1.2924], [1.1723, 1.4647, 1.6411], [1.7368,... 1.7276], [1.3771, 1.6292, 1.2366], [1.3783, 1.7941, 1.1480]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-u-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.7830], [1.7972], [1.7725], [1.7153], [1.6229], [1.2132], [1...722], [1.8942], [1.9986], [1.1903], [1.8871]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.1100], [1.8672], [1.3104], [1.5136], [1.3420], [1.4550], [1...951], [1.9523], [1.9164], [1.6898], [1.6262]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-u-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.1395], [1.4913], [1.5789], [1.0858], [1.8546], [1.1825], [1...521], [1.3482], [1.1771], [1.4772], [1.7653]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8269], [1.5564], [1.2909], [1.8875], [1.5540], [1.9768], [1...350], [1.7076], [1.7867], [1.4643], [1.8863]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: u_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-e-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.6238], [1.6203], [1.2871]]], [[[1.9735], [1.8839], ...2]]], [[[1.5462], [1.7423], [1.9263]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.9123]], [[1.4183]], [[1.5502]]]], [[[[1.4487]], [[1.308... [[[[1.7153]], [[1.7501]], [[1.9170]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 2, 1, 3, 1]), rhs shape: torch.Size([30, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-e-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.6770], [1.7819], [1.0424]]], [[[1.7217], [1.8366], ...0]]], [[[1.2316], [1.4700], [1.5471]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8948]], [[1.3100]], [[1.2132]]]], [[[[1.7865]], [[1.435... [[[[1.2657]], [[1.9176]], [[1.3631]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 2, 1, 3, 1]), rhs shape: torch.Size([40, 4, 1, 3, 1, 1]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-e-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...pe=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.9641, 1.9815, 1.4448, ..., 1.6561, 1.8218, 1.3373]], [[1.3055, 1.9929, 1.0163, ..., 1.9257,... [[1.3281, 1.4286, 1.5223, ..., 1.2736, 1.8459, 1.7464]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.9849, 1.4282, 1.0569, ..., 1.7170, 1.0159, 1.1048], [1.2116, 1.8215, 1.9905, ..., 1.0697, 1... [1.5769, 1.6737, 1.0088, ..., 1.7638, 1.5744, 1.7185]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 5, 3, 1, 7]), rhs shape: torch.Size([30, 1, 3, 7, 7]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-e-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.8546, 1.3536, 1.4759, ..., 1.0847, 1.8553, 1.2774]], [[1.7191, 1.6016, 1.7964, ..., 1.5747,... [[1.6939, 1.1369, 1.6721, ..., 1.8769, 1.6617, 1.0097]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0663, 1.2857, 1.4270, ..., 1.5127, 1.4912, 1.8075], [1.7535, 1.1541, 1.9257, ..., 1.4877, 1... [1.7343, 1.0070, 1.0867, ..., 1.4413, 1.8222, 1.4980]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 5, 3, 1, 7]), rhs shape: torch.Size([40, 1, 3, 7, 7]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-e-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.1118, 1.4788, 1.2563], [1.8143, 1.9665, 1.9869], [1.4352, 1.1784, 1.5436]]], ...6], [1.6767, 1.5017, 1.4281], [1.4779, 1.6798, 1.8858]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.8794, 1.6636, 1.0376]], [[1.7517, 1.6194, 1.3183]], [[1.5614, 1.3730, 1.5258]], ... [[1.9802, 1.9723, 1.8544]], [[1.8060, 1.6154, 1.5927]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1, 3, 3]), rhs shape: torch.Size([30, 4, 1, 3]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-e-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'e', rhs_target = 'v' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.8964, 1.4154, 1.5482], [1.1941, 1.4415, 1.1312], [1.1268, 1.0640, 1.5958]]], ...2], [1.7960, 1.4057, 1.8019], [1.1760, 1.1674, 1.7909]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.3069, 1.1007, 1.0777]], [[1.5394, 1.9101, 1.6580]], [[1.6451, 1.8117, 1.0866]], ... [[1.3082, 1.4998, 1.8956]], [[1.2093, 1.8587, 1.6293]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1, 3, 3]), rhs shape: torch.Size([40, 4, 1, 3]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-e-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.6281, 1.2408, 1.1825], [1.4080, 1.5573, 1.3649], [1.7294, 1.1051, 1.8609], [1.0601,... 1.7188], [1.0737, 1.6160, 1.0153], [1.5974, 1.6757, 1.7323]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7611, 1.7558, 1.8339], [1.5734, 1.6849, 1.8636], [1.0387, 1.6301, 1.5041], [1.3624,... 1.8514], [1.3114, 1.1233, 1.8182], [1.6630, 1.4949, 1.5909]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 3]), rhs shape: torch.Size([30, 3]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-e-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.9612, 1.8720, 1.6527], [1.9495, 1.5509, 1.3765], [1.2584, 1.3356, 1.5273], [1.7433,... 1.0302], [1.2033, 1.2182, 1.0469], [1.5828, 1.4369, 1.4399]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.6722, 1.8646, 1.6738], [1.6234, 1.2459, 1.3900], [1.4756, 1.8122, 1.4645], [1.2687,... 1.0888], [1.5728, 1.2693, 1.0705], [1.7029, 1.2034, 1.3514]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 3]), rhs shape: torch.Size([40, 3]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-e-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.3955], [1.0309], [1.4836], [1.8744], [1.0316], [1.6302], [1...165], [1.9590], [1.7205], [1.7072], [1.1169]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7460], [1.9300], [1.4345], [1.7185], [1.9642], [1.4376], [1...618], [1.5139], [1.3723], [1.5475], [1.3429]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([100, 1]), rhs shape: torch.Size([30, 1]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-v-e-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'e', rhs_target = 'v', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.3825], [1.5744], [1.1796], [1.2382], [1.5182], [1.6538], [1...041], [1.2699], [1.0968], [1.8226], [1.7192]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8729], [1.5064], [1.0722], [1.6177], [1.9078], [1.9900], [1...665], [1.1091], [1.6736], [1.1175], [1.2328]], dtype=torch.float64, requires_grad=True) lhs_target = 'e', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([300, 1]), rhs shape: torch.Size([40, 1]) SDDMM(message func: e_copy_rhs_v) ___________________ test_sddmm[idtype1-copy_rhs-e-u-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.5461], [1.1134], [1.9148]]], [[[1.0466], [1.5476], ...8]]], [[[1.6569], [1.7629], [1.3791]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.2359]], [[1.7339]], [[1.8069]]]], [[[[1.1511]], [[1.271... [[[[1.9445]], [[1.7840]], [[1.5601]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-u-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.4922], [1.3346], [1.7629]]], [[[1.8040], [1.1739], ...8]]], [[[1.7703], [1.7001], [1.9953]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8304]], [[1.3147]], [[1.3362]]]], [[[[1.0286]], [[1.303... [[[[1.8334]], [[1.1880]], [[1.3397]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-u-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.8624, 1.6415, 1.3106, ..., 1.2416, 1.2662, 1.3377]], [[1.8340, 1.7144, 1.1928, ..., 1.6145,... [[1.1213, 1.4219, 1.6994, ..., 1.5997, 1.5291, 1.4684]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.5920, 1.5072, 1.7589, ..., 1.0577, 1.4105, 1.7537], [1.1820, 1.5643, 1.9879, ..., 1.0542, 1... [1.4105, 1.7766, 1.5906, ..., 1.9838, 1.2326, 1.1804]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-u-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.2140, 1.2734, 1.5587, ..., 1.8210, 1.2328, 1.7916]], [[1.9830, 1.9815, 1.6637, ..., 1.9394,... [[1.3839, 1.5979, 1.5713, ..., 1.8910, 1.1787, 1.3532]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.2148, 1.1031, 1.0556, ..., 1.3033, 1.7380, 1.3854], [1.4534, 1.4142, 1.8542, ..., 1.7547, 1... [1.5417, 1.3774, 1.1831, ..., 1.0114, 1.3820, 1.8008]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-u-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.0862, 1.5947, 1.9391], [1.9296, 1.8309, 1.4733], [1.9755, 1.1658, 1.9348]]], ...2], [1.1396, 1.2926, 1.4690], [1.8883, 1.0083, 1.5965]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.1449, 1.9681, 1.3970]], [[1.9209, 1.8942, 1.8088]], [[1.5332, 1.9072, 1.4435]], ... [[1.3262, 1.6057, 1.3069]], [[1.0330, 1.0424, 1.1016]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-u-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'u', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.9025, 1.4085, 1.8118], [1.6018, 1.2329, 1.9827], [1.1081, 1.6972, 1.9571]]], ...7], [1.3857, 1.1395, 1.5252], [1.4621, 1.2023, 1.2420]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.5660, 1.7580, 1.6765]], [[1.5567, 1.2891, 1.3040]], [[1.4994, 1.0405, 1.9391]], ... [[1.8702, 1.4696, 1.1758]], [[1.2114, 1.6079, 1.2159]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-u-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.3189, 1.1416, 1.6804], [1.0114, 1.8601, 1.0906], [1.6158, 1.2644, 1.9614], [1.5814,... 1.5004], [1.6899, 1.3111, 1.1417], [1.9739, 1.8635, 1.4802]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8650, 1.0127, 1.2948], [1.9417, 1.5314, 1.9632], [1.5745, 1.7152, 1.2097], [1.2700,... 1.5164], [1.2597, 1.4262, 1.8704], [1.9379, 1.8416, 1.3589]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-u-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.4449, 1.5294, 1.3899], [1.0322, 1.9397, 1.0844], [1.6004, 1.5091, 1.6544], [1.5420,... 1.3438], [1.9588, 1.4960, 1.2632], [1.6547, 1.2876, 1.2955]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7808, 1.9129, 1.8497], [1.4977, 1.7720, 1.8779], [1.1004, 1.5152, 1.1619], [1.0339,... 1.1049], [1.5561, 1.3637, 1.1545], [1.7710, 1.3495, 1.3663]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-u-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.4225], [1.7946], [1.1119], [1.1458], [1.8793], [1.5906], [1...385], [1.5711], [1.1216], [1.9969], [1.2157]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.0313], [1.9611], [1.5375], [1.5047], [1.0283], [1.8949], [1...917], [1.5854], [1.4629], [1.5031], [1.6580]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-u-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'u', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.8480], [1.3502], [1.1393], [1.0587], [1.9366], [1.8910], [1...100], [1.0277], [1.9124], [1.2885], [1.6785]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3557], [1.5511], [1.9906], [1.6949], [1.6239], [1.6512], [1...219], [1.7536], [1.0172], [1.3477], [1.5440]], dtype=torch.float64, requires_grad=True) lhs_target = 'u', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: u_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-v-shp0-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': ...torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.4433], [1.9211], [1.2812]]], [[[1.3511], [1.8356], ...2]]], [[[1.7621], [1.9022], [1.5878]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.7974]], [[1.6621]], [[1.4124]]]], [[[[1.2558]], [[1.650... [[[[1.7451]], [[1.5954]], [[1.2952]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 2, 1, 3, 1]), rhs shape: torch.Size([100, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-v-shp0-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 2, 1, 3, 1), (4, 1, 3, 1, 1)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[[1.8300], [1.3979], [1.1178]]], [[[1.2207], [1.7805], ...9]]], [[[1.1486], [1.1917], [1.5695]]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[[1.8957]], [[1.5145]], [[1.3299]]]], [[[[1.7190]], [[1.777... [[[[1.8994]], [[1.8122]], [[1.6836]]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 2, 1, 3, 1]), rhs shape: torch.Size([300, 4, 1, 3, 1, 1]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-v-shp1-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Sch...pe=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.5327, 1.9880, 1.3726, ..., 1.9888, 1.8943, 1.5037]], [[1.5743, 1.1922, 1.5087, ..., 1.6350,... [[1.6179, 1.1439, 1.7235, ..., 1.9702, 1.5892, 1.3810]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.0775, 1.3257, 1.6654, ..., 1.3174, 1.2627, 1.9638], [1.0101, 1.9184, 1.3939, ..., 1.5252, 1... [1.7335, 1.8750, 1.7117, ..., 1.7456, 1.2397, 1.7010]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 2, 1, 3, 1), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3, 1, 1), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 5, 3, 1, 7]), rhs shape: torch.Size([100, 1, 3, 7, 7]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-v-shp1-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((5, 3, 1, 7), (1, 3, 7, 7)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[[1.9374, 1.9616, 1.1744, ..., 1.4169, 1.3948, 1.7673]], [[1.0227, 1.5676, 1.2092, ..., 1.4458,... [[1.4234, 1.0770, 1.3745, ..., 1.3638, 1.7544, 1.5819]]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[[1.1519, 1.3228, 1.3466, ..., 1.6335, 1.1371, 1.0469], [1.1579, 1.0918, 1.7194, ..., 1.3101, 1... [1.1575, 1.0498, 1.6135, ..., 1.0010, 1.4324, 1.4437]]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 5, 3, 1, 7]), rhs shape: torch.Size([300, 1, 3, 7, 7]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-v-shp2-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme...dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.8103, 1.4579, 1.2261], [1.7490, 1.7044, 1.5434], [1.8802, 1.2330, 1.7154]]], ...0], [1.2581, 1.0961, 1.5376], [1.1816, 1.5752, 1.9188]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.3789, 1.5061, 1.3105]], [[1.9299, 1.0755, 1.9331]], [[1.4375, 1.8366, 1.2557]], ... [[1.0134, 1.8976, 1.7609]], [[1.0335, 1.5703, 1.0897]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(5, 3, 1, 7), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1, 3, 7, 7), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1, 3, 3]), rhs shape: torch.Size([100, 4, 1, 3]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-v-shp2-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1, 3, 3), (4, 1, 3)), lhs_target = 'v', rhs_target = 'e' msg = 'copy_rhs', idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[[[1.8413, 1.4241, 1.1501], [1.3339, 1.9850, 1.1396], [1.3381, 1.3118, 1.9018]]], ...2], [1.1101, 1.6047, 1.3562], [1.6221, 1.4148, 1.9604]]]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[[[1.6795, 1.9798, 1.0570]], [[1.3804, 1.4328, 1.8370]], [[1.3010, 1.6446, 1.6242]], ... [[1.9671, 1.3054, 1.7572]], [[1.5429, 1.3686, 1.1859]]]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1, 3, 3]), rhs shape: torch.Size([300, 4, 1, 3]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-v-shp3-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.7812, 1.8957, 1.2217], [1.8328, 1.3363, 1.7436], [1.6988, 1.1592, 1.6486], [1.4804,... 1.5552], [1.9987, 1.9963, 1.4032], [1.5138, 1.9576, 1.9523]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8989, 1.8609, 1.7037], [1.7855, 1.0936, 1.8359], [1.9846, 1.1933, 1.6315], [1.5431,... 1.0257], [1.1509, 1.9224, 1.1748], [1.7452, 1.2552, 1.5176]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1, 3, 3), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(4, 1, 3), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 3]), rhs shape: torch.Size([100, 3]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-v-shp3-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((3,), (3,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.4051, 1.2752, 1.0507], [1.1362, 1.5415, 1.0611], [1.8419, 1.1748, 1.3601], [1.6928,... 1.9858], [1.0749, 1.7624, 1.3769], [1.2872, 1.7262, 1.1155]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.7797, 1.0596, 1.9202], [1.3271, 1.9990, 1.8196], [1.0188, 1.9803, 1.8149], [1.6953,... 1.1374], [1.6073, 1.3805, 1.9170], [1.5963, 1.8923, 1.0916]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 3]), rhs shape: torch.Size([300, 3]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-v-shp4-g0] ___________________ g = Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shap...=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)}) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.5791], [1.3981], [1.5732], [1.9094], [1.7909], [1.1953], [1...656], [1.7553], [1.5537], [1.4973], [1.6416]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.8651], [1.5651], [1.2820], [1.6269], [1.1079], [1.3281], [1...865], [1.9662], [1.8719], [1.2008], [1.8400]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes=30, num_edges=100, ndata_schemes={'x': Scheme(shape=(3,), dtype=torch.float64), 'y': Scheme(shape=(1,), dtype=torch.float64)} edata_schemes={'w': Scheme(shape=(), dtype=torch.float64), 'x': Scheme(shape=(1,), dtype=torch.float64), 'y': Scheme(shape=(3,), dtype=torch.float64)}) torch.int64 lhs shape: torch.Size([30, 1]), rhs shape: torch.Size([100, 1]) SDDMM(message func: v_copy_rhs_e) ___________________ test_sddmm[idtype1-copy_rhs-e-v-shp4-g1] ___________________ g = Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) shp = ((1,), (1,)), lhs_target = 'v', rhs_target = 'e', msg = 'copy_rhs' idtype = torch.int64 @pytest.mark.parametrize('g', graphs) @pytest.mark.parametrize('shp', sddmm_shapes) @pytest.mark.parametrize('lhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('rhs_target', ['u', 'v', 'e']) @pytest.mark.parametrize('msg', ['add', 'sub', 'mul', 'div', 'dot', 'copy_lhs', 'copy_rhs']) @parametrize_idtype def test_sddmm(g, shp, lhs_target, rhs_target, msg, idtype): if lhs_target == rhs_target: return g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0: pytest.skip() # mxnet do not support zero shape tensor print(g) print(g.idtype) len_lhs = select( lhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) lhs_shp = (len_lhs,) + shp[0] len_rhs = select( rhs_target, g.number_of_src_nodes(), g.number_of_edges(), g.number_of_dst_nodes()) rhs_shp = (len_rhs,) + shp[1] feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1) feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1) print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs), F.shape(feat_rhs))) lhs_frame = select( lhs_target, g.srcdata, g.edata, g.dstdata) rhs_frame = select( rhs_target, g.srcdata, g.edata, g.dstdata) lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs)) rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs)) msg_func = lhs_target + '_' + msg + '_' + rhs_target print('SDDMM(message func: {})'.format(msg_func)) lhs = F.attach_grad(F.clone(feat_lhs)) rhs = F.attach_grad(F.clone(feat_rhs)) with F.record_grad(): > e = gsddmm(g, msg, lhs, rhs, lhs_target=lhs_target, rhs_target=rhs_target) tests/compute/test_sparse.py:207: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs' lhs_data = tensor([[1.3214], [1.7330], [1.2684], [1.6351], [1.7854], [1.0336], [1...414], [1.4728], [1.4876], [1.7324], [1.9385]], dtype=torch.float64, requires_grad=True) rhs_data = tensor([[1.3953], [1.6293], [1.0898], [1.0729], [1.0595], [1.8069], [1...907], [1.4576], [1.2714], [1.5366], [1.3968]], dtype=torch.float64, requires_grad=True) lhs_target = 'v', rhs_target = 'e' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ----------------------------- Captured stdout call ----------------------------- Graph(num_nodes={'_U': 30, '_V': 40}, num_edges={('_U', '_E', '_V'): 300}, metagraph=[('_U', '_V', '_E')]) torch.int64 lhs shape: torch.Size([40, 1]), rhs shape: torch.Size([300, 1]) SDDMM(message func: v_copy_rhs_e) ____________________ test_edge_softmax[idtype0-shp0-src-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'src', shp = (1,), idtype = torch.int32 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[0.0753], [0.8055], [0.8982], [0.0914], [0.3260], [0.6156], [0.2587], [0.9070], [0.5179]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'src' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype0-shp0-dst-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'dst', shp = (1,), idtype = torch.int32 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[0.5736], [0.3749], [0.6673], [0.4789], [0.1116], [0.1633], [0.0682], [0.7571], [0.5223]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'dst' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype0-shp1-src-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'src', shp = (1, 3), idtype = torch.int32 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[[0.6629, 0.3666, 0.2642]], [[0.6724, 0.8719, 0.7598]], [[0.7094, 0.3214, 0.3994]], ...]], [[0.5871, 0.8898, 0.1532]], [[0.2140, 0.0463, 0.5087]]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'src' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype0-shp1-dst-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'dst', shp = (1, 3), idtype = torch.int32 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[[0.9109, 0.4017, 0.8143]], [[0.1241, 0.5949, 0.0951]], [[0.4755, 0.2490, 0.2331]], ...]], [[0.4047, 0.4939, 0.9638]], [[0.8521, 0.0064, 0.9972]]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'dst' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype0-shp2-src-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'src', shp = (3, 4, 5), idtype = torch.int32 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[[[2.3568e-01, 3.1214e-01, 6.2320e-01, 9.0590e-02, 1.1290e-01], [5.4334e-01, 5.5248e-01, 8.9387e-01,... [2.0251e-01, 3.4976e-01, 1.0666e-01, 5.7245e-01, 8.2016e-01]]]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'src' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype0-shp2-dst-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'dst', shp = (3, 4, 5), idtype = torch.int32 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[[[0.8553, 0.1891, 0.1369, 0.2803, 0.0448], [0.4629, 0.1602, 0.5088, 0.5059, 0.6377], [0.8...0.6237, 0.5539], [0.8131, 0.7173, 0.0011, 0.5368, 0.5062]]]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'dst' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype1-shp0-src-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'src', shp = (1,), idtype = torch.int64 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[0.8406], [0.1745], [0.3345], [0.3646], [0.8309], [0.6845], [0.4655], [0.4576], [0.9292]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'src' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype1-shp0-dst-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'dst', shp = (1,), idtype = torch.int64 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[0.8463], [0.7901], [0.9339], [0.8135], [0.9437], [0.2803], [0.2323], [0.1038], [0.3317]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'dst' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype1-shp1-src-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'src', shp = (1, 3), idtype = torch.int64 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[[0.6063, 0.7308, 0.0134]], [[0.5140, 0.2919, 0.0165]], [[0.1430, 0.6988, 0.0289]], ...]], [[0.6512, 0.7367, 0.2710]], [[0.7152, 0.4692, 0.2998]]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'src' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype1-shp1-dst-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'dst', shp = (1, 3), idtype = torch.int64 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[[0.8018, 0.2317, 0.2448]], [[0.5826, 0.9925, 0.3086]], [[0.9497, 0.0042, 0.9720]], ...]], [[0.3193, 0.1606, 0.5287]], [[0.3239, 0.7866, 0.1637]]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'dst' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype1-shp2-src-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'src', shp = (3, 4, 5), idtype = torch.int64 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[[[0.7391, 0.7704, 0.6141, 0.9523, 0.4700], [0.7583, 0.1576, 0.5015, 0.5538, 0.9407], [0.2...0.3769, 0.2626], [0.6082, 0.1104, 0.3483, 0.0534, 0.1117]]]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'src' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ____________________ test_edge_softmax[idtype1-shp2-dst-g0] ____________________ g = Graph(num_nodes=3, num_edges=9, ndata_schemes={} edata_schemes={}) norm_by = 'dst', shp = (3, 4, 5), idtype = torch.int64 @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_idtype def test_edge_softmax(g, norm_by, shp, idtype): g = g.astype(idtype).to(F.ctx()) edata = F.tensor(np.random.rand(g.number_of_edges(), *shp)) e1 = F.attach_grad(F.clone(edata)) with F.record_grad(): > score1 = edge_softmax(g, e1, norm_by=norm_by) tests/compute/test_sparse.py:240: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/edge_softmax.py:135: in edge_softmax eids=eids, norm_by=norm_by) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = logits = tensor([[[[6.6720e-01, 7.0214e-01, 7.0333e-01, 1.2660e-01, 6.5713e-03], [1.4280e-01, 7.9931e-01, 7.0660e-01,... [7.5633e-01, 4.3664e-01, 5.7141e-01, 4.3226e-01, 2.6665e-01]]]], dtype=torch.float64, requires_grad=True) eids = '__ALL__', norm_by = 'dst' def edge_softmax(gidx, logits, eids=ALL, norm_by='dst'): args = _cast_if_autocast_enabled(gidx, logits, eids, norm_by) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:770: TypeError ___________________________ test_segment_reduce[sum] ___________________________ reducer = 'sum' @pytest.mark.parametrize('reducer', ['sum', 'max', 'min', 'mean']) def test_segment_reduce(reducer): ctx = F.ctx() value = F.tensor(np.random.rand(10, 5)) v1 = F.attach_grad(F.clone(value)) v2 = F.attach_grad(F.clone(value)) seglen = F.tensor([2, 3, 0, 4, 1, 0, 0]) u = F.copy_to(F.arange(0, F.shape(value)[0], F.int32), ctx) v = F.repeat(F.copy_to(F.arange(0, len(seglen), F.int32), ctx), seglen, dim=0) num_nodes = {'_U': len(u), '_V': len(seglen)} g = dgl.convert.heterograph({('_U', '_E', '_V'): (u, v)}, num_nodes_dict=num_nodes) with F.record_grad(): > rst1 = gspmm(g, 'copy_lhs', reducer, v1, None) tests/compute/test_sparse.py:275: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.5684, 0.6808, 0.9339, 0.2656, 0.1624], [0.2807, 0.1330, 0.1766, 0.5875, 0.8666], [0.3529, 0...17, 0.7085, 0.0792], [0.4422, 0.3290, 0.8093, 0.7278, 0.1863]], dtype=torch.float64, requires_grad=True) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________________ test_segment_reduce[max] ___________________________ reducer = 'max' @pytest.mark.parametrize('reducer', ['sum', 'max', 'min', 'mean']) def test_segment_reduce(reducer): ctx = F.ctx() value = F.tensor(np.random.rand(10, 5)) v1 = F.attach_grad(F.clone(value)) v2 = F.attach_grad(F.clone(value)) seglen = F.tensor([2, 3, 0, 4, 1, 0, 0]) u = F.copy_to(F.arange(0, F.shape(value)[0], F.int32), ctx) v = F.repeat(F.copy_to(F.arange(0, len(seglen), F.int32), ctx), seglen, dim=0) num_nodes = {'_U': len(u), '_V': len(seglen)} g = dgl.convert.heterograph({('_U', '_E', '_V'): (u, v)}, num_nodes_dict=num_nodes) with F.record_grad(): > rst1 = gspmm(g, 'copy_lhs', reducer, v1, None) tests/compute/test_sparse.py:275: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'max' lhs_data = tensor([[0.3728, 0.7506, 0.0820, 0.9710, 0.0291], [0.6357, 0.9240, 0.7497, 0.3817, 0.5144], [0.1001, 0...81, 0.4114, 0.6433], [0.5135, 0.2694, 0.9685, 0.4559, 0.6366]], dtype=torch.float64, requires_grad=True) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________________ test_segment_reduce[min] ___________________________ reducer = 'min' @pytest.mark.parametrize('reducer', ['sum', 'max', 'min', 'mean']) def test_segment_reduce(reducer): ctx = F.ctx() value = F.tensor(np.random.rand(10, 5)) v1 = F.attach_grad(F.clone(value)) v2 = F.attach_grad(F.clone(value)) seglen = F.tensor([2, 3, 0, 4, 1, 0, 0]) u = F.copy_to(F.arange(0, F.shape(value)[0], F.int32), ctx) v = F.repeat(F.copy_to(F.arange(0, len(seglen), F.int32), ctx), seglen, dim=0) num_nodes = {'_U': len(u), '_V': len(seglen)} g = dgl.convert.heterograph({('_U', '_E', '_V'): (u, v)}, num_nodes_dict=num_nodes) with F.record_grad(): > rst1 = gspmm(g, 'copy_lhs', reducer, v1, None) tests/compute/test_sparse.py:275: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'min' lhs_data = tensor([[0.4160, 0.4735, 0.8111, 0.7301, 0.0301], [0.7698, 0.4966, 0.0686, 0.6620, 0.5250], [0.6728, 0...60, 0.4410, 0.1940], [0.3650, 0.2974, 0.4912, 0.9652, 0.7168]], dtype=torch.float64, requires_grad=True) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError __________________________ test_segment_reduce[mean] ___________________________ reducer = 'mean' @pytest.mark.parametrize('reducer', ['sum', 'max', 'min', 'mean']) def test_segment_reduce(reducer): ctx = F.ctx() value = F.tensor(np.random.rand(10, 5)) v1 = F.attach_grad(F.clone(value)) v2 = F.attach_grad(F.clone(value)) seglen = F.tensor([2, 3, 0, 4, 1, 0, 0]) u = F.copy_to(F.arange(0, F.shape(value)[0], F.int32), ctx) v = F.repeat(F.copy_to(F.arange(0, len(seglen), F.int32), ctx), seglen, dim=0) num_nodes = {'_U': len(u), '_V': len(seglen)} g = dgl.convert.heterograph({('_U', '_E', '_V'): (u, v)}, num_nodes_dict=num_nodes) with F.record_grad(): > rst1 = gspmm(g, 'copy_lhs', reducer, v1, None) tests/compute/test_sparse.py:275: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[0.1120, 0.9807, 0.7658, 0.3284, 0.4239], [0.3536, 0.1679, 0.5856, 0.1316, 0.8034], [0.9506, 0...98, 0.4188, 0.7377], [0.5520, 0.7393, 0.2410, 0.9761, 0.4062]], dtype=torch.float64, requires_grad=True) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________ test_segment_mm[dtype2-0.003-1-idtype0] ____________________ idtype = torch.int32, feat_size = 1, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb704909048> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype2-0.003-1-idtype1] ____________________ idtype = torch.int64, feat_size = 1, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb457b40a20> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype2-0.003-8-idtype0] ____________________ idtype = torch.int32, feat_size = 8, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb43e431278> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype2-0.003-8-idtype1] ____________________ idtype = torch.int64, feat_size = 8, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb43e6664e0> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype2-0.003-16-idtype0] ___________________ idtype = torch.int32, feat_size = 16, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb72ad11748> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype2-0.003-16-idtype1] ___________________ idtype = torch.int64, feat_size = 16, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb6152cd6a0> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype2-0.003-64-idtype0] ___________________ idtype = torch.int32, feat_size = 64, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb72b848fd0> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype2-0.003-64-idtype1] ___________________ idtype = torch.int64, feat_size = 64, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb7060e8ba8> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError __________________ test_segment_mm[dtype2-0.003-256-idtype0] ___________________ idtype = torch.int32, feat_size = 256, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb43e7eaba8> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError __________________ test_segment_mm[dtype2-0.003-256-idtype1] ___________________ idtype = torch.int64, feat_size = 256, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb72b90a8d0> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype3-0.0001-1-idtype0] ___________________ idtype = torch.int32, feat_size = 1, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb43e683438> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype3-0.0001-1-idtype1] ___________________ idtype = torch.int64, feat_size = 1, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb7297275f8> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype3-0.0001-8-idtype0] ___________________ idtype = torch.int32, feat_size = 8, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb61fa534e0> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_segment_mm[dtype3-0.0001-8-idtype1] ___________________ idtype = torch.int64, feat_size = 8, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb706123e48> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError __________________ test_segment_mm[dtype3-0.0001-16-idtype0] ___________________ idtype = torch.int32, feat_size = 16, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb43e7ea588> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError __________________ test_segment_mm[dtype3-0.0001-16-idtype1] ___________________ idtype = torch.int64, feat_size = 16, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb72a9bfbe0> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError __________________ test_segment_mm[dtype3-0.0001-64-idtype0] ___________________ idtype = torch.int32, feat_size = 64, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb6146d2780> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError __________________ test_segment_mm[dtype3-0.0001-64-idtype1] ___________________ idtype = torch.int64, feat_size = 64, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb43eaf64a8> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError __________________ test_segment_mm[dtype3-0.0001-256-idtype0] __________________ idtype = torch.int32, feat_size = 256, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb72b7fccc0> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError __________________ test_segment_mm[dtype3-0.0001-256-idtype1] __________________ idtype = torch.int64, feat_size = 256, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @parametrize_idtype @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 1e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_segment_mm(idtype, feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:299: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb705d057b8> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError _____________________ test_gather_mm_idx_b[dtype2-0.003-1] _____________________ feat_size = 1, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 2e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_gather_mm_idx_b(feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:339: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb7291ef4e0> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError _____________________ test_gather_mm_idx_b[dtype2-0.003-8] _____________________ feat_size = 8, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 2e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_gather_mm_idx_b(feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:339: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb43e422f98> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ____________________ test_gather_mm_idx_b[dtype2-0.003-16] _____________________ feat_size = 16, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 2e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_gather_mm_idx_b(feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:339: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb7062b07f0> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ____________________ test_gather_mm_idx_b[dtype2-0.003-64] _____________________ feat_size = 64, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 2e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_gather_mm_idx_b(feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:339: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb72aba1e10> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ____________________ test_gather_mm_idx_b[dtype2-0.003-256] ____________________ feat_size = 256, dtype = torch.float32, tol = 0.003 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 2e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_gather_mm_idx_b(feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:339: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb7064f00f0> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ____________________ test_gather_mm_idx_b[dtype3-0.0001-1] _____________________ feat_size = 1, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 2e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_gather_mm_idx_b(feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:339: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb43e9cb668> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ____________________ test_gather_mm_idx_b[dtype3-0.0001-8] _____________________ feat_size = 8, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 2e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_gather_mm_idx_b(feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:339: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb705fba128> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ____________________ test_gather_mm_idx_b[dtype3-0.0001-16] ____________________ feat_size = 16, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 2e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_gather_mm_idx_b(feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:339: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb7061235f8> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ____________________ test_gather_mm_idx_b[dtype3-0.0001-64] ____________________ feat_size = 64, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 2e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_gather_mm_idx_b(feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:339: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb729703748> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________ test_gather_mm_idx_b[dtype3-0.0001-256] ____________________ feat_size = 256, dtype = torch.float64, tol = 0.0001 @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('feat_size', [1, 8, 16, 64, 256]) @pytest.mark.parametrize('dtype, tol', [(torch.float16, 1e-2), (torch.bfloat16, 2e-2), (torch.float32, 3e-3), (torch.float64, 1e-4)]) def test_gather_mm_idx_b(feat_size, dtype, tol): if F._default_context_str == 'cpu' and dtype in (torch.float16, torch.bfloat16): pytest.skip("Only support float32 and float64 on CPU.") > if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ and dtype == torch.bfloat16: tests/compute/test_sparse.py:339: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:72: in __lt__ c = self._cmp(other) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <[AttributeError("'LooseVersion' object has no attribute 'vstring'") raised in repr()] LooseVersion object at 0x7fb72baf5550> other = LooseVersion ('11.0') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) elif not isinstance(other, LooseVersion): return NotImplemented > if self.version == other.version: E AttributeError: 'LooseVersion' object has no attribute 'version' /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:355: AttributeError ___________________________ test_use_libxsmm_switch ____________________________ @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @unittest.skipIf(F._default_context_str == 'gpu', reason="Libxsmm only fit in CPU.") def test_use_libxsmm_switch(): import torch g = dgl.graph(([0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2])) x = torch.ones(3, 2, requires_grad=True) y = torch.arange(1, 13).float().view(6, 2).requires_grad_() assert dgl.is_libxsmm_enabled() > dgl.ops.u_mul_e_sum(g, x, y) tests/compute/test_sparse.py:407: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/ops/spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[1., 1.], [1., 1.], [1., 1.]], requires_grad=True) rhs_data = tensor([[ 1., 2.], [ 3., 4.], [ 5., 6.], [ 7., 8.], [ 9., 10.], [11., 12.]], requires_grad=True) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _________________________ test_v2v_update_all[idtype0] _________________________ idtype = torch.int32 @parametrize_idtype def test_v2v_update_all(idtype): def _test(fld): def message_func(edges): return {'m' : edges.src[fld]} def message_func_edge(edges): if len(edges.src[fld].shape) == 1: return {'m' : edges.src[fld] * edges.data['e1']} else: return {'m' : edges.src[fld] * edges.data['e2']} def reduce_func(nodes): return {fld : F.sum(nodes.mailbox['m'], 1)} def apply_func(nodes): return {fld : 2 * nodes.data[fld]} g = generate_graph(idtype) # update all v1 = g.ndata[fld] g.update_all(fn.copy_src(src=fld, out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata.update({fld : v1}) g.update_all(message_func, reduce_func, apply_func) v3 = g.ndata[fld] assert F.allclose(v2, v3) # update all with edge weights v1 = g.ndata[fld] g.update_all(fn.src_mul_edge(src=fld, edge='e1', out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata.update({fld : v1}) g.update_all(message_func_edge, reduce_func, apply_func) v4 = g.ndata[fld] assert F.allclose(v2, v4) # test 1d node features > _test('f1') tests/compute/test_specialization.py:61: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_specialization.py:45: in _test g.update_all(fn.copy_src(src=fld, out='m'), fn.sum(msg='m', out=fld), apply_func) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([ 1.7772, -0.6740, -0.8050, 0.1309, -0.4080, -1.8522, -0.3026, 0.1197, -1.4796, 0.8724]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _________________________ test_v2v_update_all[idtype1] _________________________ idtype = torch.int64 @parametrize_idtype def test_v2v_update_all(idtype): def _test(fld): def message_func(edges): return {'m' : edges.src[fld]} def message_func_edge(edges): if len(edges.src[fld].shape) == 1: return {'m' : edges.src[fld] * edges.data['e1']} else: return {'m' : edges.src[fld] * edges.data['e2']} def reduce_func(nodes): return {fld : F.sum(nodes.mailbox['m'], 1)} def apply_func(nodes): return {fld : 2 * nodes.data[fld]} g = generate_graph(idtype) # update all v1 = g.ndata[fld] g.update_all(fn.copy_src(src=fld, out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata.update({fld : v1}) g.update_all(message_func, reduce_func, apply_func) v3 = g.ndata[fld] assert F.allclose(v2, v3) # update all with edge weights v1 = g.ndata[fld] g.update_all(fn.src_mul_edge(src=fld, edge='e1', out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata.update({fld : v1}) g.update_all(message_func_edge, reduce_func, apply_func) v4 = g.ndata[fld] assert F.allclose(v2, v4) # test 1d node features > _test('f1') tests/compute/test_specialization.py:61: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_specialization.py:45: in _test g.update_all(fn.copy_src(src=fld, out='m'), fn.sum(msg='m', out=fld), apply_func) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([-2.1621, 0.5268, -0.1093, -0.0476, 0.5928, -0.7815, 0.8331, -0.5041, -0.0937, -0.1564]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ____________________________ test_v2v_snr[idtype0] _____________________________ idtype = torch.int32 @parametrize_idtype def test_v2v_snr(idtype): u = F.tensor([0, 0, 0, 3, 4, 9], idtype) v = F.tensor([1, 2, 3, 9, 9, 0], idtype) def _test(fld): def message_func(edges): return {'m' : edges.src[fld]} def message_func_edge(edges): if len(edges.src[fld].shape) == 1: return {'m' : edges.src[fld] * edges.data['e1']} else: return {'m' : edges.src[fld] * edges.data['e2']} def reduce_func(nodes): return {fld : F.sum(nodes.mailbox['m'], 1)} def apply_func(nodes): return {fld : 2 * nodes.data[fld]} g = generate_graph(idtype) # send and recv v1 = g.ndata[fld] g.send_and_recv((u, v), fn.copy_src(src=fld, out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata.update({fld : v1}) g.send_and_recv((u, v), message_func, reduce_func, apply_func) v3 = g.ndata[fld] assert F.allclose(v2, v3) # send and recv with edge weights v1 = g.ndata[fld] g.send_and_recv((u, v), fn.src_mul_edge(src=fld, edge='e1', out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata.update({fld : v1}) g.send_and_recv((u, v), message_func_edge, reduce_func, apply_func) v4 = g.ndata[fld] assert F.allclose(v2, v4) # test 1d node features > _test('f1') tests/compute/test_specialization.py:104: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_specialization.py:88: in _test fn.sum(msg='m', out=fld), apply_func) python/dgl/heterograph.py:4607: in send_and_recv compute_graph, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([-0.9186, -1.2810, 0.3374, 1.1025]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ____________________________ test_v2v_snr[idtype1] _____________________________ idtype = torch.int64 @parametrize_idtype def test_v2v_snr(idtype): u = F.tensor([0, 0, 0, 3, 4, 9], idtype) v = F.tensor([1, 2, 3, 9, 9, 0], idtype) def _test(fld): def message_func(edges): return {'m' : edges.src[fld]} def message_func_edge(edges): if len(edges.src[fld].shape) == 1: return {'m' : edges.src[fld] * edges.data['e1']} else: return {'m' : edges.src[fld] * edges.data['e2']} def reduce_func(nodes): return {fld : F.sum(nodes.mailbox['m'], 1)} def apply_func(nodes): return {fld : 2 * nodes.data[fld]} g = generate_graph(idtype) # send and recv v1 = g.ndata[fld] g.send_and_recv((u, v), fn.copy_src(src=fld, out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata.update({fld : v1}) g.send_and_recv((u, v), message_func, reduce_func, apply_func) v3 = g.ndata[fld] assert F.allclose(v2, v3) # send and recv with edge weights v1 = g.ndata[fld] g.send_and_recv((u, v), fn.src_mul_edge(src=fld, edge='e1', out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata.update({fld : v1}) g.send_and_recv((u, v), message_func_edge, reduce_func, apply_func) v4 = g.ndata[fld] assert F.allclose(v2, v4) # test 1d node features > _test('f1') tests/compute/test_specialization.py:104: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_specialization.py:88: in _test fn.sum(msg='m', out=fld), apply_func) python/dgl/heterograph.py:4607: in send_and_recv compute_graph, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([-0.8413, 0.4031, 2.2376, 0.3881]), rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ____________________________ test_v2v_pull[idtype0] ____________________________ idtype = torch.int32 @parametrize_idtype def test_v2v_pull(idtype): nodes = F.tensor([1, 2, 3, 9], idtype) def _test(fld): def message_func(edges): return {'m' : edges.src[fld]} def message_func_edge(edges): if len(edges.src[fld].shape) == 1: return {'m' : edges.src[fld] * edges.data['e1']} else: return {'m' : edges.src[fld] * edges.data['e2']} def reduce_func(nodes): return {fld : F.sum(nodes.mailbox['m'], 1)} def apply_func(nodes): return {fld : 2 * nodes.data[fld]} g = generate_graph(idtype) # send and recv v1 = g.ndata[fld] g.pull(nodes, fn.copy_src(src=fld, out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata[fld] = v1 g.pull(nodes, message_func, reduce_func, apply_func) v3 = g.ndata[fld] assert F.allclose(v2, v3) # send and recv with edge weights v1 = g.ndata[fld] g.pull(nodes, fn.src_mul_edge(src=fld, edge='e1', out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata[fld] = v1 g.pull(nodes, message_func_edge, reduce_func, apply_func) v4 = g.ndata[fld] assert F.allclose(v2, v4) # test 1d node features > _test('f1') tests/compute/test_specialization.py:146: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_specialization.py:130: in _test g.pull(nodes, fn.copy_src(src=fld, out='m'), fn.sum(msg='m', out=fld), apply_func) python/dgl/heterograph.py:4711: in pull compute_graph, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([ 0.7355, -0.4884, 0.4938, -0.2935, -0.2445, 1.1038, -2.9475, -0.7513, 0.4680]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ____________________________ test_v2v_pull[idtype1] ____________________________ idtype = torch.int64 @parametrize_idtype def test_v2v_pull(idtype): nodes = F.tensor([1, 2, 3, 9], idtype) def _test(fld): def message_func(edges): return {'m' : edges.src[fld]} def message_func_edge(edges): if len(edges.src[fld].shape) == 1: return {'m' : edges.src[fld] * edges.data['e1']} else: return {'m' : edges.src[fld] * edges.data['e2']} def reduce_func(nodes): return {fld : F.sum(nodes.mailbox['m'], 1)} def apply_func(nodes): return {fld : 2 * nodes.data[fld]} g = generate_graph(idtype) # send and recv v1 = g.ndata[fld] g.pull(nodes, fn.copy_src(src=fld, out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata[fld] = v1 g.pull(nodes, message_func, reduce_func, apply_func) v3 = g.ndata[fld] assert F.allclose(v2, v3) # send and recv with edge weights v1 = g.ndata[fld] g.pull(nodes, fn.src_mul_edge(src=fld, edge='e1', out='m'), fn.sum(msg='m', out=fld), apply_func) v2 = g.ndata[fld] g.ndata[fld] = v1 g.pull(nodes, message_func_edge, reduce_func, apply_func) v4 = g.ndata[fld] assert F.allclose(v2, v4) # test 1d node features > _test('f1') tests/compute/test_specialization.py:146: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_specialization.py:130: in _test g.pull(nodes, fn.copy_src(src=fld, out='m'), fn.sum(msg='m', out=fld), apply_func) python/dgl/heterograph.py:4711: in pull compute_graph, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([ 0.9869, -0.2625, -0.7306, -0.1077, 1.2263, 1.3180, -0.8481, 0.3126, -0.8374]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________ test_update_all_multi_fallback[idtype0] ____________________ idtype = torch.int32 @parametrize_idtype def test_update_all_multi_fallback(idtype): # create a graph with zero in degree nodes g = dgl.DGLGraph() g = g.astype(idtype).to(F.ctx()) g.add_nodes(10) for i in range(1, 9): g.add_edge(0, i) g.add_edge(i, 9) g.ndata['h'] = F.randn((10, D)) g.edata['w1'] = F.randn((16,)) g.edata['w2'] = F.randn((16, D)) def _mfunc_hxw1(edges): return {'m1' : edges.src['h'] * F.unsqueeze(edges.data['w1'], 1)} def _mfunc_hxw2(edges): return {'m2' : edges.src['h'] * edges.data['w2']} def _rfunc_m1(nodes): return {'o1' : F.sum(nodes.mailbox['m1'], 1)} def _rfunc_m2(nodes): return {'o2' : F.sum(nodes.mailbox['m2'], 1)} def _rfunc_m1max(nodes): return {'o3' : F.max(nodes.mailbox['m1'], 1)} def _afunc(nodes): ret = {} for k, v in nodes.data.items(): if k.startswith('o'): ret[k] = 2 * v return ret # compute ground truth g.update_all(_mfunc_hxw1, _rfunc_m1, _afunc) o1 = g.ndata.pop('o1') g.update_all(_mfunc_hxw2, _rfunc_m2, _afunc) o2 = g.ndata.pop('o2') g.update_all(_mfunc_hxw1, _rfunc_m1max, _afunc) o3 = g.ndata.pop('o3') # v2v spmv g.update_all(fn.src_mul_edge(src='h', edge='w1', out='m1'), fn.sum(msg='m1', out='o1'), > _afunc) tests/compute/test_specialization.py:188: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:323: in invoke_gspmm z = op(graph, x, y) python/dgl/ops/spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 1.2555, -0.5233, -0.6529, -0.9409, -0.7616], [ 0.8602, -0.1180, -0.1393, -0.6949, 1.6017], ...0.2302], [ 0.4938, 0.6535, -0.3516, -0.2553, 0.4336], [-2.0572, 0.5819, -0.4576, 0.9890, 1.5942]]) rhs_data = tensor([[-1.4983], [-1.2475], [-0.6912], [ 0.6846], [ 0.2962], [-0.4096], ...92], [-0.4065], [-0.1517], [ 0.2591], [ 0.9079], [ 0.3428], [ 0.5044]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________ test_update_all_multi_fallback[idtype1] ____________________ idtype = torch.int64 @parametrize_idtype def test_update_all_multi_fallback(idtype): # create a graph with zero in degree nodes g = dgl.DGLGraph() g = g.astype(idtype).to(F.ctx()) g.add_nodes(10) for i in range(1, 9): g.add_edge(0, i) g.add_edge(i, 9) g.ndata['h'] = F.randn((10, D)) g.edata['w1'] = F.randn((16,)) g.edata['w2'] = F.randn((16, D)) def _mfunc_hxw1(edges): return {'m1' : edges.src['h'] * F.unsqueeze(edges.data['w1'], 1)} def _mfunc_hxw2(edges): return {'m2' : edges.src['h'] * edges.data['w2']} def _rfunc_m1(nodes): return {'o1' : F.sum(nodes.mailbox['m1'], 1)} def _rfunc_m2(nodes): return {'o2' : F.sum(nodes.mailbox['m2'], 1)} def _rfunc_m1max(nodes): return {'o3' : F.max(nodes.mailbox['m1'], 1)} def _afunc(nodes): ret = {} for k, v in nodes.data.items(): if k.startswith('o'): ret[k] = 2 * v return ret # compute ground truth g.update_all(_mfunc_hxw1, _rfunc_m1, _afunc) o1 = g.ndata.pop('o1') g.update_all(_mfunc_hxw2, _rfunc_m2, _afunc) o2 = g.ndata.pop('o2') g.update_all(_mfunc_hxw1, _rfunc_m1max, _afunc) o3 = g.ndata.pop('o3') # v2v spmv g.update_all(fn.src_mul_edge(src='h', edge='w1', out='m1'), fn.sum(msg='m1', out='o1'), > _afunc) tests/compute/test_specialization.py:188: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:323: in invoke_gspmm z = op(graph, x, y) python/dgl/ops/spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-1.2040, -1.8783, 1.2041, -1.0065, -0.0673], [ 0.0639, -1.7493, 0.0023, -0.1434, -0.2575], ...0.6172], [-1.1544, 1.9917, -1.1734, 0.8495, -0.3927], [ 0.0521, 0.0979, 0.4685, -1.2702, 1.0220]]) rhs_data = tensor([[ 1.4022], [-0.5181], [-0.6281], [-1.5431], [-0.3130], [ 2.7795], ...09], [ 1.6756], [ 0.0592], [-0.7749], [-0.1535], [ 0.4783], [-0.4258]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ______________________ test_pull_multi_fallback[idtype0] _______________________ idtype = torch.int32 @parametrize_idtype def test_pull_multi_fallback(idtype): # create a graph with zero in degree nodes g = dgl.DGLGraph() g = g.astype(idtype).to(F.ctx()) g.add_nodes(10) for i in range(1, 9): g.add_edge(0, i) g.add_edge(i, 9) g.ndata['h'] = F.randn((10, D)) g.edata['w1'] = F.randn((16,)) g.edata['w2'] = F.randn((16, D)) def _mfunc_hxw1(edges): return {'m1' : edges.src['h'] * F.unsqueeze(edges.data['w1'], 1)} def _mfunc_hxw2(edges): return {'m2' : edges.src['h'] * edges.data['w2']} def _rfunc_m1(nodes): return {'o1' : F.sum(nodes.mailbox['m1'], 1)} def _rfunc_m2(nodes): return {'o2' : F.sum(nodes.mailbox['m2'], 1)} def _rfunc_m1max(nodes): return {'o3' : F.max(nodes.mailbox['m1'], 1)} def _afunc(nodes): ret = {} for k, v in nodes.data.items(): if k.startswith('o'): ret[k] = 2 * v return ret # nodes to pull def _pull_nodes(nodes): # compute ground truth g.pull(nodes, _mfunc_hxw1, _rfunc_m1, _afunc) o1 = g.ndata.pop('o1') g.pull(nodes, _mfunc_hxw2, _rfunc_m2, _afunc) o2 = g.ndata.pop('o2') g.pull(nodes, _mfunc_hxw1, _rfunc_m1max, _afunc) o3 = g.ndata.pop('o3') # v2v spmv g.pull(nodes, fn.src_mul_edge(src='h', edge='w1', out='m1'), fn.sum(msg='m1', out='o1'), _afunc) assert F.allclose(o1, g.ndata.pop('o1')) # v2v fallback to e2v g.pull(nodes, fn.src_mul_edge(src='h', edge='w2', out='m2'), fn.sum(msg='m2', out='o2'), _afunc) assert F.allclose(o2, g.ndata.pop('o2')) # test#1: non-0deg nodes nodes = [1, 2, 9] > _pull_nodes(nodes) tests/compute/test_specialization.py:245: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_specialization.py:236: in _pull_nodes _afunc) python/dgl/heterograph.py:4711: in pull compute_graph, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:323: in invoke_gspmm z = op(graph, x, y) python/dgl/ops/spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[-0.4455, 0.0341, -0.4392, -1.0152, 0.2764], [-0.2255, -1.1024, 0.8154, 1.9210, 0.4685], ...1.3489], [-0.8711, -0.2574, 0.3529, -0.6929, -0.1747], [-0.0171, -0.0439, -0.5086, -1.2868, -1.2972]]) rhs_data = tensor([[-0.4035], [ 0.0707], [-0.5416], [-0.2029], [-0.3885], [ 0.5734], [ 0.5644], [-0.1903], [-0.3218], [ 1.1178]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ______________________ test_pull_multi_fallback[idtype1] _______________________ idtype = torch.int64 @parametrize_idtype def test_pull_multi_fallback(idtype): # create a graph with zero in degree nodes g = dgl.DGLGraph() g = g.astype(idtype).to(F.ctx()) g.add_nodes(10) for i in range(1, 9): g.add_edge(0, i) g.add_edge(i, 9) g.ndata['h'] = F.randn((10, D)) g.edata['w1'] = F.randn((16,)) g.edata['w2'] = F.randn((16, D)) def _mfunc_hxw1(edges): return {'m1' : edges.src['h'] * F.unsqueeze(edges.data['w1'], 1)} def _mfunc_hxw2(edges): return {'m2' : edges.src['h'] * edges.data['w2']} def _rfunc_m1(nodes): return {'o1' : F.sum(nodes.mailbox['m1'], 1)} def _rfunc_m2(nodes): return {'o2' : F.sum(nodes.mailbox['m2'], 1)} def _rfunc_m1max(nodes): return {'o3' : F.max(nodes.mailbox['m1'], 1)} def _afunc(nodes): ret = {} for k, v in nodes.data.items(): if k.startswith('o'): ret[k] = 2 * v return ret # nodes to pull def _pull_nodes(nodes): # compute ground truth g.pull(nodes, _mfunc_hxw1, _rfunc_m1, _afunc) o1 = g.ndata.pop('o1') g.pull(nodes, _mfunc_hxw2, _rfunc_m2, _afunc) o2 = g.ndata.pop('o2') g.pull(nodes, _mfunc_hxw1, _rfunc_m1max, _afunc) o3 = g.ndata.pop('o3') # v2v spmv g.pull(nodes, fn.src_mul_edge(src='h', edge='w1', out='m1'), fn.sum(msg='m1', out='o1'), _afunc) assert F.allclose(o1, g.ndata.pop('o1')) # v2v fallback to e2v g.pull(nodes, fn.src_mul_edge(src='h', edge='w2', out='m2'), fn.sum(msg='m2', out='o2'), _afunc) assert F.allclose(o2, g.ndata.pop('o2')) # test#1: non-0deg nodes nodes = [1, 2, 9] > _pull_nodes(nodes) tests/compute/test_specialization.py:245: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_specialization.py:236: in _pull_nodes _afunc) python/dgl/heterograph.py:4711: in pull compute_graph, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:323: in invoke_gspmm z = op(graph, x, y) python/dgl/ops/spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[ 0.0952, 0.6272, -0.6850, -0.2446, -0.5820], [ 0.0647, -0.5618, -0.1020, 1.3919, -0.5272], ...0.3953], [-0.0307, 1.1244, -0.8526, 0.3079, 0.8971], [ 0.1836, 2.9280, -1.3431, 0.3906, 0.9234]]) rhs_data = tensor([[-1.0083], [ 2.0015], [-0.4615], [ 0.0321], [ 0.2798], [ 0.1885], [-0.6768], [-1.3942], [-0.8055], [-0.9279]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError __________________________ test_spmv_3d_feat[idtype0] __________________________ idtype = torch.int32 @parametrize_idtype def test_spmv_3d_feat(idtype): def src_mul_edge_udf(edges): return {'sum': edges.src['h'] * F.unsqueeze(F.unsqueeze(edges.data['h'], 1), 1)} def sum_udf(nodes): return {'h': F.sum(nodes.mailbox['sum'], 1)} n = 100 p = 0.1 a = sp.random(n, n, p, data_rvs=lambda n: np.ones(n)) g = dgl.DGLGraph(a) g = g.astype(idtype).to(F.ctx()) m = g.number_of_edges() # test#1: v2v with adj data h = F.randn((n, 5, 5)) e = F.randn((m,)) g.ndata['h'] = h g.edata['h'] = e > g.update_all(message_func=fn.src_mul_edge('h', 'h', 'sum'), reduce_func=fn.sum('sum', 'h')) # 1 tests/compute/test_specialization.py:271: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:323: in invoke_gspmm z = op(graph, x, y) python/dgl/ops/spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[ 0.1781, -1.7383, 0.5895, -0.7099, -0.3568], [ 0.6212, 0.1393, -0.3065, -1.9176, -1.1655], ...793], [ 0.1927, 0.7801, 0.6507, 0.0136, 0.2582], [-0.5435, 0.6627, 0.4242, 1.0154, 0.3597]]]) rhs_data = tensor([[[ 1.4667e+00]], [[-1.7203e+00]], [[-4.8379e-01]], [[-7.1619e-01]], [[-9.4... 1.0453e+00]], [[ 1.0157e+00]], [[-1.0315e+00]], [[ 9.9752e-01]], [[-4.0651e-02]]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError __________________________ test_spmv_3d_feat[idtype1] __________________________ idtype = torch.int64 @parametrize_idtype def test_spmv_3d_feat(idtype): def src_mul_edge_udf(edges): return {'sum': edges.src['h'] * F.unsqueeze(F.unsqueeze(edges.data['h'], 1), 1)} def sum_udf(nodes): return {'h': F.sum(nodes.mailbox['sum'], 1)} n = 100 p = 0.1 a = sp.random(n, n, p, data_rvs=lambda n: np.ones(n)) g = dgl.DGLGraph(a) g = g.astype(idtype).to(F.ctx()) m = g.number_of_edges() # test#1: v2v with adj data h = F.randn((n, 5, 5)) e = F.randn((m,)) g.ndata['h'] = h g.edata['h'] = e > g.update_all(message_func=fn.src_mul_edge('h', 'h', 'sum'), reduce_func=fn.sum('sum', 'h')) # 1 tests/compute/test_specialization.py:271: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:323: in invoke_gspmm z = op(graph, x, y) python/dgl/ops/spmm.py:147: in func return gspmm(g, binary_op, reduce_op, x, y) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'mul', reduce_op = 'sum' lhs_data = tensor([[[-1.2946, 2.1732, 0.7860, 0.4174, -0.6091], [ 1.0391, -0.0897, 0.3572, -0.1215, 1.1465], ...113], [ 0.0366, 1.7481, -0.2313, -0.8954, 0.1273], [ 0.0860, -0.1194, -0.4341, -1.7336, -0.7365]]]) rhs_data = tensor([[[-1.0758e+00]], [[-1.0779e+00]], [[-1.2231e+00]], [[-2.1482e+00]], [[ 3.0...-9.5957e-01]], [[ 2.0114e+00]], [[-1.7067e+00]], [[ 1.0071e+00]], [[-7.6480e-01]]]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _______________________________ test_khop_graph ________________________________ @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented") def test_khop_graph(): N = 20 feat = F.randn((N, 5)) def _test(g): for k in range(4): g_k = dgl.khop_graph(g, k) # use original graph to do message passing for k times. g.ndata['h'] = feat for _ in range(k): g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) h_0 = g.ndata.pop('h') # use k-hop graph to do message passing for one time. g_k.ndata['h'] = feat g_k.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) h_1 = g_k.ndata.pop('h') assert F.allclose(h_0, h_1, rtol=1e-3, atol=1e-3) # Test for random undirected graphs g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3)) > _test(g) tests/compute/test_transform.py:471: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/compute/test_transform.py:465: in _test g_k.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-0.5472, 0.0936, 0.8332, 0.2144, 1.4115], [ 1.5455, -0.3460, -1.0035, -0.6472, -0.4461], ...0.9718], [-1.0126, -0.3500, 1.0720, -0.9389, 1.1804], [-0.1025, 0.7345, -0.1483, 1.0094, -0.1806]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ________________________________ test_khop_adj _________________________________ @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented") def test_khop_adj(): N = 20 feat = F.randn((N, 5)) g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3)) for k in range(3): adj = F.tensor(F.swapaxes(dgl.khop_adj(g, k), 0, 1)) # use original graph to do message passing for k times. g.ndata['h'] = feat for _ in range(k): > g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) tests/compute/test_transform.py:486: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 2.7372e+00, -8.0551e-02, -1.7735e+00, -1.2046e+00, -8.0728e-02], [-1.0605e+00, -3.3512e-01, -8.3698e...01, -1.0084e+00, -9.4934e-01, 9.5661e-01], [-1.5257e+00, 4.6575e-02, 1.4175e+00, 9.3071e-01, 7.3970e-01]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________________ test_to_simple[idtype0] ____________________________ idtype = torch.int32 @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU to simple not implemented") @parametrize_idtype def test_to_simple(idtype): # homogeneous graph g = dgl.graph((F.tensor([0, 1, 2, 1]), F.tensor([1, 2, 0, 2]))) g.ndata['h'] = F.tensor([[0.], [1.], [2.]]) g.edata['h'] = F.tensor([[3.], [4.], [5.], [6.]]) sg, wb = dgl.to_simple(g, writeback_mapping=True) u, v = g.all_edges(form='uv', order='eid') u = F.asnumpy(u).tolist() v = F.asnumpy(v).tolist() uv = list(zip(u, v)) eid_map = F.asnumpy(wb) su, sv = sg.all_edges(form='uv', order='eid') su = F.asnumpy(su).tolist() sv = F.asnumpy(sv).tolist() suv = list(zip(su, sv)) sc = F.asnumpy(sg.edata['count']) assert set(uv) == set(suv) for i, e in enumerate(suv): assert sc[i] == sum(e == _e for _e in uv) for i, e in enumerate(uv): assert eid_map[i] == suv.index(e) # shared ndata assert F.array_equal(sg.ndata['h'], g.ndata['h']) assert 'h' not in sg.edata # new ndata to sg sg.ndata['hh'] = F.tensor([[0.], [1.], [2.]]) assert 'hh' not in g.ndata sg = dgl.to_simple(g, writeback_mapping=False, copy_ndata=False) assert 'h' not in sg.ndata assert 'h' not in sg.edata # test coalesce edge feature sg = dgl.to_simple(g, copy_edata=True, aggregator='arbitrary') assert F.allclose(sg.edata['h'][1], F.tensor([4.])) > sg = dgl.to_simple(g, copy_edata=True, aggregator='sum') tests/compute/test_transform.py:816: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/functional.py:2625: in to_simple new_edge_frames = _coalesce_edge_frame(g, edge_maps, counts, aggregator) python/dgl/transforms/functional.py:2453: in _coalesce_edge_frame new_data = F.scatter_add(data, feat_idx, _num_rows) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ x = tensor([[3.], [4.], [5.], [6.]]) idx = tensor([0, 1, 2, 1]), m = 3 def scatter_add(x, idx, m): args = _cast_if_autocast_enabled(x, idx, m) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:785: TypeError ___________________________ test_to_simple[idtype1] ____________________________ idtype = torch.int64 @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU to simple not implemented") @parametrize_idtype def test_to_simple(idtype): # homogeneous graph g = dgl.graph((F.tensor([0, 1, 2, 1]), F.tensor([1, 2, 0, 2]))) g.ndata['h'] = F.tensor([[0.], [1.], [2.]]) g.edata['h'] = F.tensor([[3.], [4.], [5.], [6.]]) sg, wb = dgl.to_simple(g, writeback_mapping=True) u, v = g.all_edges(form='uv', order='eid') u = F.asnumpy(u).tolist() v = F.asnumpy(v).tolist() uv = list(zip(u, v)) eid_map = F.asnumpy(wb) su, sv = sg.all_edges(form='uv', order='eid') su = F.asnumpy(su).tolist() sv = F.asnumpy(sv).tolist() suv = list(zip(su, sv)) sc = F.asnumpy(sg.edata['count']) assert set(uv) == set(suv) for i, e in enumerate(suv): assert sc[i] == sum(e == _e for _e in uv) for i, e in enumerate(uv): assert eid_map[i] == suv.index(e) # shared ndata assert F.array_equal(sg.ndata['h'], g.ndata['h']) assert 'h' not in sg.edata # new ndata to sg sg.ndata['hh'] = F.tensor([[0.], [1.], [2.]]) assert 'hh' not in g.ndata sg = dgl.to_simple(g, writeback_mapping=False, copy_ndata=False) assert 'h' not in sg.ndata assert 'h' not in sg.edata # test coalesce edge feature sg = dgl.to_simple(g, copy_edata=True, aggregator='arbitrary') assert F.allclose(sg.edata['h'][1], F.tensor([4.])) > sg = dgl.to_simple(g, copy_edata=True, aggregator='sum') tests/compute/test_transform.py:816: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/functional.py:2625: in to_simple new_edge_frames = _coalesce_edge_frame(g, edge_maps, counts, aggregator) python/dgl/transforms/functional.py:2453: in _coalesce_edge_frame new_data = F.scatter_add(data, feat_idx, _num_rows) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ x = tensor([[3.], [4.], [5.], [6.]]) idx = tensor([0, 1, 2, 1]), m = 3 def scatter_add(x, idx, m): args = _cast_if_autocast_enabled(x, idx, m) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:785: TypeError __________________________ test_remove_edges[idtype0] __________________________ idtype = torch.int32 @parametrize_idtype def test_remove_edges(idtype): # homogeneous Graphs g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) e = 0 g = dgl.remove_edges(g, e) assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([2], dtype=idtype)) g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) e = [0] g = dgl.remove_edges(g, e) assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([2], dtype=idtype)) e = F.tensor([0], dtype=idtype) g = dgl.remove_edges(g, e) assert g.number_of_edges() == 0 # has node data g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) g.ndata['h'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g = dgl.remove_edges(g, 1) assert g.number_of_edges() == 1 assert F.array_equal(g.ndata['h'], F.tensor([1, 2, 3], dtype=idtype)) # has edge data g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) g.edata['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx()) g = dgl.remove_edges(g, 0) assert g.number_of_edges() == 1 assert F.array_equal(g.edata['h'], F.tensor([2], dtype=idtype)) # invalid eid assert_fail = False try: g = dgl.remove_edges(g, 1) except: assert_fail = True assert assert_fail # bipartite graph g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) e = 0 g = dgl.remove_edges(g, e) assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([2], dtype=idtype)) g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) e = [0] g = dgl.remove_edges(g, e) assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([2], dtype=idtype)) e = F.tensor([0], dtype=idtype) g = dgl.remove_edges(g, e) assert g.number_of_edges() == 0 # has data g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) g.nodes['user'].data['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx()) g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2, 2], dtype=idtype), ctx=F.ctx()) g.edata['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx()) g = dgl.remove_edges(g, 1) assert g.number_of_edges() == 1 assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1], dtype=idtype)) assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 2], dtype=idtype)) assert F.array_equal(g.edata['h'], F.tensor([1], dtype=idtype)) # heterogeneous graph g = create_test_heterograph3(idtype) g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx()) g = dgl.remove_edges(g, 1, etype='plays') assert g.number_of_edges('plays') == 3 u, v = g.edges(form='uv', order='eid', etype='plays') assert F.array_equal(u, F.tensor([0, 1, 2], dtype=idtype)) assert F.array_equal(v, F.tensor([0, 1, 1], dtype=idtype)) assert F.array_equal(g.edges['plays'].data['h'], F.tensor([1, 3, 4], dtype=idtype)) # remove all edges of 'develops' g = dgl.remove_edges(g, [0, 1], etype='develops') assert g.number_of_edges('develops') == 0 assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1], dtype=idtype)) assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2], dtype=idtype)) assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3], dtype=idtype)) # batched graph ctx = F.ctx() g1 = dgl.graph(([0, 1], [1, 2]), num_nodes=5, idtype=idtype, device=ctx) g2 = dgl.graph(([], []), idtype=idtype, device=ctx) g3 = dgl.graph(([2, 3, 4], [3, 2, 1]), idtype=idtype, device=ctx) bg = dgl.batch([g1, g2, g3]) > bg_r = dgl.remove_edges(bg, 2) tests/compute/test_transform.py:1389: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/functional.py:1755: in remove_edges g.remove_edges(eids, etype=etype, store_ids=store_ids) python/dgl/heterograph.py:648: in remove_edges one_hot_removed_edges, reducer='sum') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum', x = tensor([0., 0., 1., 0., 0.]), offsets = tensor([0, 2, 2, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError __________________________ test_remove_edges[idtype1] __________________________ idtype = torch.int64 @parametrize_idtype def test_remove_edges(idtype): # homogeneous Graphs g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) e = 0 g = dgl.remove_edges(g, e) assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([2], dtype=idtype)) g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) e = [0] g = dgl.remove_edges(g, e) assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([2], dtype=idtype)) e = F.tensor([0], dtype=idtype) g = dgl.remove_edges(g, e) assert g.number_of_edges() == 0 # has node data g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) g.ndata['h'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g = dgl.remove_edges(g, 1) assert g.number_of_edges() == 1 assert F.array_equal(g.ndata['h'], F.tensor([1, 2, 3], dtype=idtype)) # has edge data g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) g.edata['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx()) g = dgl.remove_edges(g, 0) assert g.number_of_edges() == 1 assert F.array_equal(g.edata['h'], F.tensor([2], dtype=idtype)) # invalid eid assert_fail = False try: g = dgl.remove_edges(g, 1) except: assert_fail = True assert assert_fail # bipartite graph g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) e = 0 g = dgl.remove_edges(g, e) assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([2], dtype=idtype)) g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) e = [0] g = dgl.remove_edges(g, e) assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([2], dtype=idtype)) e = F.tensor([0], dtype=idtype) g = dgl.remove_edges(g, e) assert g.number_of_edges() == 0 # has data g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) g.nodes['user'].data['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx()) g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2, 2], dtype=idtype), ctx=F.ctx()) g.edata['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx()) g = dgl.remove_edges(g, 1) assert g.number_of_edges() == 1 assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1], dtype=idtype)) assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 2], dtype=idtype)) assert F.array_equal(g.edata['h'], F.tensor([1], dtype=idtype)) # heterogeneous graph g = create_test_heterograph3(idtype) g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx()) g = dgl.remove_edges(g, 1, etype='plays') assert g.number_of_edges('plays') == 3 u, v = g.edges(form='uv', order='eid', etype='plays') assert F.array_equal(u, F.tensor([0, 1, 2], dtype=idtype)) assert F.array_equal(v, F.tensor([0, 1, 1], dtype=idtype)) assert F.array_equal(g.edges['plays'].data['h'], F.tensor([1, 3, 4], dtype=idtype)) # remove all edges of 'develops' g = dgl.remove_edges(g, [0, 1], etype='develops') assert g.number_of_edges('develops') == 0 assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1], dtype=idtype)) assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2], dtype=idtype)) assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3], dtype=idtype)) # batched graph ctx = F.ctx() g1 = dgl.graph(([0, 1], [1, 2]), num_nodes=5, idtype=idtype, device=ctx) g2 = dgl.graph(([], []), idtype=idtype, device=ctx) g3 = dgl.graph(([2, 3, 4], [3, 2, 1]), idtype=idtype, device=ctx) bg = dgl.batch([g1, g2, g3]) > bg_r = dgl.remove_edges(bg, 2) tests/compute/test_transform.py:1389: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/functional.py:1755: in remove_edges g.remove_edges(eids, etype=etype, store_ids=store_ids) python/dgl/heterograph.py:648: in remove_edges one_hot_removed_edges, reducer='sum') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum', x = tensor([0., 0., 1., 0., 0.]), offsets = tensor([0, 2, 2, 5]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError __________________________ test_remove_nodes[idtype0] __________________________ idtype = torch.int32 @parametrize_idtype def test_remove_nodes(idtype): # homogeneous Graphs g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) n = 0 g = dgl.remove_nodes(g, n) assert g.number_of_nodes() == 2 assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0], dtype=idtype)) assert F.array_equal(v, F.tensor([1], dtype=idtype)) g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) n = [1] g = dgl.remove_nodes(g, n) assert g.number_of_nodes() == 2 assert g.number_of_edges() == 0 g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) n = F.tensor([2], dtype=idtype) g = dgl.remove_nodes(g, n) assert g.number_of_nodes() == 2 assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0], dtype=idtype)) assert F.array_equal(v, F.tensor([1], dtype=idtype)) # invalid nid assert_fail = False try: g.remove_nodes(3) except: assert_fail = True assert assert_fail # has node and edge data g = dgl.graph(([0, 0, 2], [0, 1, 2]), idtype=idtype, device=F.ctx()) g.ndata['hv'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g.edata['he'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g = dgl.remove_nodes(g, F.tensor([0], dtype=idtype)) assert g.number_of_nodes() == 2 assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([1], dtype=idtype)) assert F.array_equal(g.ndata['hv'], F.tensor([2, 3], dtype=idtype)) assert F.array_equal(g.edata['he'], F.tensor([3], dtype=idtype)) # node id larger than current max node id g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) n = 0 g = dgl.remove_nodes(g, n, ntype='user') assert g.number_of_nodes('user') == 1 assert g.number_of_nodes('game') == 3 assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0], dtype=idtype)) assert F.array_equal(v, F.tensor([2], dtype=idtype)) g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) n = [1] g = dgl.remove_nodes(g, n, ntype='user') assert g.number_of_nodes('user') == 1 assert g.number_of_nodes('game') == 3 assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0], dtype=idtype)) assert F.array_equal(v, F.tensor([1], dtype=idtype)) g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) n = F.tensor([0], dtype=idtype) g = dgl.remove_nodes(g, n, ntype='game') assert g.number_of_nodes('user') == 2 assert g.number_of_nodes('game') == 2 assert g.number_of_edges() == 2 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0, 1], dtype=idtype)) assert F.array_equal(v, F.tensor([0 ,1], dtype=idtype)) # heterogeneous graph g = create_test_heterograph3(idtype) g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx()) g = dgl.remove_nodes(g, 0, ntype='game') assert g.number_of_nodes('user') == 3 assert g.number_of_nodes('game') == 1 assert g.number_of_nodes('developer') == 2 assert g.number_of_edges('plays') == 2 assert g.number_of_edges('develops') == 1 assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1], dtype=idtype)) assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2], dtype=idtype)) assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3], dtype=idtype)) u, v = g.edges(form='uv', order='eid', etype='plays') assert F.array_equal(u, F.tensor([1, 2], dtype=idtype)) assert F.array_equal(v, F.tensor([0, 0], dtype=idtype)) assert F.array_equal(g.edges['plays'].data['h'], F.tensor([3, 4], dtype=idtype)) u, v = g.edges(form='uv', order='eid', etype='develops') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([0], dtype=idtype)) # batched graph ctx = F.ctx() g1 = dgl.graph(([0, 1], [1, 2]), num_nodes=5, idtype=idtype, device=ctx) g2 = dgl.graph(([], []), idtype=idtype, device=ctx) g3 = dgl.graph(([2, 3, 4], [3, 2, 1]), idtype=idtype, device=ctx) bg = dgl.batch([g1, g2, g3]) > bg_r = dgl.remove_nodes(bg, 1) tests/compute/test_transform.py:1565: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/functional.py:1835: in remove_nodes g.remove_nodes(nids, ntype=ntype, store_ids=store_ids) python/dgl/heterograph.py:774: in remove_nodes c_ntype_batch_num_nodes, one_hot_removed_nodes, reducer='sum') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum', x = tensor([0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]) offsets = tensor([ 0, 5, 5, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError __________________________ test_remove_nodes[idtype1] __________________________ idtype = torch.int64 @parametrize_idtype def test_remove_nodes(idtype): # homogeneous Graphs g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) n = 0 g = dgl.remove_nodes(g, n) assert g.number_of_nodes() == 2 assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0], dtype=idtype)) assert F.array_equal(v, F.tensor([1], dtype=idtype)) g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) n = [1] g = dgl.remove_nodes(g, n) assert g.number_of_nodes() == 2 assert g.number_of_edges() == 0 g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx()) n = F.tensor([2], dtype=idtype) g = dgl.remove_nodes(g, n) assert g.number_of_nodes() == 2 assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0], dtype=idtype)) assert F.array_equal(v, F.tensor([1], dtype=idtype)) # invalid nid assert_fail = False try: g.remove_nodes(3) except: assert_fail = True assert assert_fail # has node and edge data g = dgl.graph(([0, 0, 2], [0, 1, 2]), idtype=idtype, device=F.ctx()) g.ndata['hv'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g.edata['he'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g = dgl.remove_nodes(g, F.tensor([0], dtype=idtype)) assert g.number_of_nodes() == 2 assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([1], dtype=idtype)) assert F.array_equal(g.ndata['hv'], F.tensor([2, 3], dtype=idtype)) assert F.array_equal(g.edata['he'], F.tensor([3], dtype=idtype)) # node id larger than current max node id g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) n = 0 g = dgl.remove_nodes(g, n, ntype='user') assert g.number_of_nodes('user') == 1 assert g.number_of_nodes('game') == 3 assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0], dtype=idtype)) assert F.array_equal(v, F.tensor([2], dtype=idtype)) g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) n = [1] g = dgl.remove_nodes(g, n, ntype='user') assert g.number_of_nodes('user') == 1 assert g.number_of_nodes('game') == 3 assert g.number_of_edges() == 1 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0], dtype=idtype)) assert F.array_equal(v, F.tensor([1], dtype=idtype)) g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx()) n = F.tensor([0], dtype=idtype) g = dgl.remove_nodes(g, n, ntype='game') assert g.number_of_nodes('user') == 2 assert g.number_of_nodes('game') == 2 assert g.number_of_edges() == 2 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0, 1], dtype=idtype)) assert F.array_equal(v, F.tensor([0 ,1], dtype=idtype)) # heterogeneous graph g = create_test_heterograph3(idtype) g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx()) g = dgl.remove_nodes(g, 0, ntype='game') assert g.number_of_nodes('user') == 3 assert g.number_of_nodes('game') == 1 assert g.number_of_nodes('developer') == 2 assert g.number_of_edges('plays') == 2 assert g.number_of_edges('develops') == 1 assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1], dtype=idtype)) assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2], dtype=idtype)) assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3], dtype=idtype)) u, v = g.edges(form='uv', order='eid', etype='plays') assert F.array_equal(u, F.tensor([1, 2], dtype=idtype)) assert F.array_equal(v, F.tensor([0, 0], dtype=idtype)) assert F.array_equal(g.edges['plays'].data['h'], F.tensor([3, 4], dtype=idtype)) u, v = g.edges(form='uv', order='eid', etype='develops') assert F.array_equal(u, F.tensor([1], dtype=idtype)) assert F.array_equal(v, F.tensor([0], dtype=idtype)) # batched graph ctx = F.ctx() g1 = dgl.graph(([0, 1], [1, 2]), num_nodes=5, idtype=idtype, device=ctx) g2 = dgl.graph(([], []), idtype=idtype, device=ctx) g3 = dgl.graph(([2, 3, 4], [3, 2, 1]), idtype=idtype, device=ctx) bg = dgl.batch([g1, g2, g3]) > bg_r = dgl.remove_nodes(bg, 1) tests/compute/test_transform.py:1565: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/functional.py:1835: in remove_nodes g.remove_nodes(nids, ntype=ntype, store_ids=store_ids) python/dgl/heterograph.py:774: in remove_nodes c_ntype_batch_num_nodes, one_hot_removed_nodes, reducer='sum') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum', x = tensor([0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]) offsets = tensor([ 0, 5, 5, 10]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError __________________________ test_add_selfloop[idtype0] __________________________ idtype = torch.int32 @parametrize_idtype def test_add_selfloop(idtype): # homogeneous graph # test for fill_data is float g = dgl.graph(([0, 0, 2], [2, 1, 0]), idtype=idtype, device=F.ctx()) g.edata['he'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g.edata['he1'] = F.copy_to(F.tensor([[0., 1.], [2., 3.], [4., 5.]]), ctx=F.ctx()) g.ndata['hn'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g = dgl.add_self_loop(g) assert g.number_of_nodes() == 3 assert g.number_of_edges() == 6 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0, 0, 2, 0, 1, 2], dtype=idtype)) assert F.array_equal(v, F.tensor([2, 1, 0, 0, 1, 2], dtype=idtype)) assert F.array_equal(g.edata['he'], F.tensor([1, 2, 3, 1, 1, 1], dtype=idtype)) assert F.array_equal(g.edata['he1'], F.tensor([[0., 1.], [2., 3.], [4., 5.], [1., 1.], [1., 1.], [1., 1.]])) # test for fill_data is int g = dgl.graph(([0, 0, 2], [2, 1, 0]), idtype=idtype, device=F.ctx()) g.edata['he'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g.edata['he1'] = F.copy_to(F.tensor([[0, 1], [2, 3], [4, 5]], dtype=idtype), ctx=F.ctx()) g.ndata['hn'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g = dgl.add_self_loop(g, fill_data=1) assert g.number_of_nodes() == 3 assert g.number_of_edges() == 6 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0, 0, 2, 0, 1, 2], dtype=idtype)) assert F.array_equal(v, F.tensor([2, 1, 0, 0, 1, 2], dtype=idtype)) assert F.array_equal(g.edata['he'], F.tensor([1, 2, 3, 1, 1, 1], dtype=idtype)) assert F.array_equal(g.edata['he1'], F.tensor([[0, 1], [2, 3], [4, 5], [1, 1], [1, 1], [1, 1]], dtype=idtype)) # test for fill_data is str g = dgl.graph(([0, 0, 2], [2, 1, 0]), idtype=idtype, device=F.ctx()) g.edata['he'] = F.copy_to(F.tensor([1., 2., 3.]), ctx=F.ctx()) g.edata['he1'] = F.copy_to(F.tensor([[0., 1.], [2., 3.], [4., 5.]]), ctx=F.ctx()) g.ndata['hn'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) > g = dgl.add_self_loop(g, fill_data='sum') tests/compute/test_transform.py:1675: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/functional.py:1950: in add_self_loop g.update_all(function.copy_e(feat_name, "h"), reducer('h', 'h'), etype=etype) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 2., 3.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError __________________________ test_add_selfloop[idtype1] __________________________ idtype = torch.int64 @parametrize_idtype def test_add_selfloop(idtype): # homogeneous graph # test for fill_data is float g = dgl.graph(([0, 0, 2], [2, 1, 0]), idtype=idtype, device=F.ctx()) g.edata['he'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g.edata['he1'] = F.copy_to(F.tensor([[0., 1.], [2., 3.], [4., 5.]]), ctx=F.ctx()) g.ndata['hn'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g = dgl.add_self_loop(g) assert g.number_of_nodes() == 3 assert g.number_of_edges() == 6 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0, 0, 2, 0, 1, 2], dtype=idtype)) assert F.array_equal(v, F.tensor([2, 1, 0, 0, 1, 2], dtype=idtype)) assert F.array_equal(g.edata['he'], F.tensor([1, 2, 3, 1, 1, 1], dtype=idtype)) assert F.array_equal(g.edata['he1'], F.tensor([[0., 1.], [2., 3.], [4., 5.], [1., 1.], [1., 1.], [1., 1.]])) # test for fill_data is int g = dgl.graph(([0, 0, 2], [2, 1, 0]), idtype=idtype, device=F.ctx()) g.edata['he'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g.edata['he1'] = F.copy_to(F.tensor([[0, 1], [2, 3], [4, 5]], dtype=idtype), ctx=F.ctx()) g.ndata['hn'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) g = dgl.add_self_loop(g, fill_data=1) assert g.number_of_nodes() == 3 assert g.number_of_edges() == 6 u, v = g.edges(form='uv', order='eid') assert F.array_equal(u, F.tensor([0, 0, 2, 0, 1, 2], dtype=idtype)) assert F.array_equal(v, F.tensor([2, 1, 0, 0, 1, 2], dtype=idtype)) assert F.array_equal(g.edata['he'], F.tensor([1, 2, 3, 1, 1, 1], dtype=idtype)) assert F.array_equal(g.edata['he1'], F.tensor([[0, 1], [2, 3], [4, 5], [1, 1], [1, 1], [1, 1]], dtype=idtype)) # test for fill_data is str g = dgl.graph(([0, 0, 2], [2, 1, 0]), idtype=idtype, device=F.ctx()) g.edata['he'] = F.copy_to(F.tensor([1., 2., 3.]), ctx=F.ctx()) g.edata['he1'] = F.copy_to(F.tensor([[0., 1.], [2., 3.], [4., 5.]]), ctx=F.ctx()) g.ndata['hn'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx()) > g = dgl.add_self_loop(g, fill_data='sum') tests/compute/test_transform.py:1675: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/functional.py:1950: in add_self_loop g.update_all(function.copy_e(feat_name, "h"), reducer('h', 'h'), etype=etype) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([1., 2., 3.]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ________________________ test_remove_selfloop[idtype0] _________________________ idtype = torch.int32 @parametrize_idtype def test_remove_selfloop(idtype): # homogeneous graph g = dgl.graph(([0, 0, 0, 1], [1, 0, 0, 2]), idtype=idtype, device=F.ctx()) g.edata['he'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx()) g = dgl.remove_self_loop(g) assert g.number_of_nodes() == 3 assert g.number_of_edges() == 2 assert F.array_equal(g.edata['he'], F.tensor([1, 4], dtype=idtype)) # bipartite graph g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1, 2], [1, 2, 2])}, idtype=idtype, device=F.ctx()) # nothing will happend raise_error = False try: g = dgl.remove_self_loop(g, etype='plays') except: raise_error = True assert raise_error g = create_test_heterograph4(idtype) g = dgl.remove_self_loop(g, etype='follows') assert g.number_of_nodes('user') == 3 assert g.number_of_nodes('game') == 2 assert g.number_of_edges('follows') == 2 assert g.number_of_edges('plays') == 2 u, v = g.edges(form='uv', order='eid', etype='follows') assert F.array_equal(u, F.tensor([1, 2], dtype=idtype)) assert F.array_equal(v, F.tensor([0, 1], dtype=idtype)) assert F.array_equal(g.edges['follows'].data['h'], F.tensor([2, 4], dtype=idtype)) assert F.array_equal(g.edges['plays'].data['h'], F.tensor([1, 2], dtype=idtype)) raise_error = False try: g = dgl.remove_self_loop(g, etype='plays') except: raise_error = True assert raise_error # batch information g = dgl.graph(([0, 0, 0, 1, 3, 3, 4], [1, 0, 0, 2, 3, 4, 4]), idtype=idtype, device=F.ctx()) g.set_batch_num_nodes(F.tensor([3, 2], dtype=F.int64)) g.set_batch_num_edges(F.tensor([4, 3], dtype=F.int64)) > g = dgl.remove_self_loop(g) tests/compute/test_transform.py:1804: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/functional.py:2032: in remove_self_loop new_g = remove_edges(g, self_loop_eids, etype=etype) python/dgl/transforms/functional.py:1755: in remove_edges g.remove_edges(eids, etype=etype, store_ids=store_ids) python/dgl/heterograph.py:648: in remove_edges one_hot_removed_edges, reducer='sum') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum', x = tensor([0., 1., 1., 0., 1., 0., 1.]) offsets = tensor([0, 4, 7]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError ________________________ test_remove_selfloop[idtype1] _________________________ idtype = torch.int64 @parametrize_idtype def test_remove_selfloop(idtype): # homogeneous graph g = dgl.graph(([0, 0, 0, 1], [1, 0, 0, 2]), idtype=idtype, device=F.ctx()) g.edata['he'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx()) g = dgl.remove_self_loop(g) assert g.number_of_nodes() == 3 assert g.number_of_edges() == 2 assert F.array_equal(g.edata['he'], F.tensor([1, 4], dtype=idtype)) # bipartite graph g = dgl.heterograph( {('user', 'plays', 'game'): ([0, 1, 2], [1, 2, 2])}, idtype=idtype, device=F.ctx()) # nothing will happend raise_error = False try: g = dgl.remove_self_loop(g, etype='plays') except: raise_error = True assert raise_error g = create_test_heterograph4(idtype) g = dgl.remove_self_loop(g, etype='follows') assert g.number_of_nodes('user') == 3 assert g.number_of_nodes('game') == 2 assert g.number_of_edges('follows') == 2 assert g.number_of_edges('plays') == 2 u, v = g.edges(form='uv', order='eid', etype='follows') assert F.array_equal(u, F.tensor([1, 2], dtype=idtype)) assert F.array_equal(v, F.tensor([0, 1], dtype=idtype)) assert F.array_equal(g.edges['follows'].data['h'], F.tensor([2, 4], dtype=idtype)) assert F.array_equal(g.edges['plays'].data['h'], F.tensor([1, 2], dtype=idtype)) raise_error = False try: g = dgl.remove_self_loop(g, etype='plays') except: raise_error = True assert raise_error # batch information g = dgl.graph(([0, 0, 0, 1, 3, 3, 4], [1, 0, 0, 2, 3, 4, 4]), idtype=idtype, device=F.ctx()) g.set_batch_num_nodes(F.tensor([3, 2], dtype=F.int64)) g.set_batch_num_edges(F.tensor([4, 3], dtype=F.int64)) > g = dgl.remove_self_loop(g) tests/compute/test_transform.py:1804: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/functional.py:2032: in remove_self_loop new_g = remove_edges(g, self_loop_eids, etype=etype) python/dgl/transforms/functional.py:1755: in remove_edges g.remove_edges(eids, etype=etype, store_ids=store_ids) python/dgl/heterograph.py:648: in remove_edges one_hot_removed_edges, reducer='sum') python/dgl/ops/segment.py:52: in segment_reduce rst = F.segment_reduce(reducer, value, offsets) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ op = 'sum', x = tensor([0., 1., 1., 0., 1., 0., 1.]) offsets = tensor([0, 4, 7]) def segment_reduce(op, x, offsets): args = _cast_if_autocast_enabled(op, x, offsets) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:780: TypeError _________________________ test_module_gcnnorm[idtype0] _________________________ idtype = torch.int32 @parametrize_idtype def test_module_gcnnorm(idtype): g = dgl.heterograph({ ('A', 'r1', 'A'): ([0, 1, 2], [0, 0, 1]), ('A', 'r2', 'B'): ([0, 0], [1, 1]), ('B', 'r3', 'B'): ([0, 1, 2], [0, 0, 1]) }, idtype=idtype, device=F.ctx()) g.edges['r3'].data['w'] = F.tensor([0.1, 0.2, 0.3]) transform = dgl.GCNNorm() > new_g = transform(g) tests/compute/test_transform.py:2305: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/module.py:1043: in __call__ result[c_etype] = self.calc_etype(c_etype, g) python/dgl/transforms/module.py:1025: in calc_etype g.update_all(fn.copy_e(self.eweight_name, 'm'), fn.sum('m', 'deg'), etype=c_etype) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.1000, 0.2000, 0.3000]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _________________________ test_module_gcnnorm[idtype1] _________________________ idtype = torch.int64 @parametrize_idtype def test_module_gcnnorm(idtype): g = dgl.heterograph({ ('A', 'r1', 'A'): ([0, 1, 2], [0, 0, 1]), ('A', 'r2', 'B'): ([0, 0], [1, 1]), ('B', 'r3', 'B'): ([0, 1, 2], [0, 0, 1]) }, idtype=idtype, device=F.ctx()) g.edges['r3'].data['w'] = F.tensor([0.1, 0.2, 0.3]) transform = dgl.GCNNorm() > new_g = transform(g) tests/compute/test_transform.py:2305: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/module.py:1043: in __call__ result[c_etype] = self.calc_etype(c_etype, g) python/dgl/transforms/module.py:1025: in calc_etype g.update_all(fn.copy_e(self.eweight_name, 'm'), fn.sum('m', 'deg'), etype=c_etype) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([0.1000, 0.2000, 0.3000]) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _____________________________ test_module_sign[g0] _____________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) @unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now') @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'])) def test_module_sign(g): import torch atol = 1e-06 ctx = F.ctx() g = g.to(ctx) adj = g.adj(transpose=True, scipy_fmt='coo').todense() adj = torch.tensor(adj).float().to(ctx) weight_adj = g.adj(transpose=True, scipy_fmt='coo').astype(float).todense() weight_adj = torch.tensor(weight_adj).float().to(ctx) src, dst = g.edges() src, dst = src.long(), dst.long() weight_adj[dst, src] = g.edata['scalar_w'] # raw transform = dgl.SIGNDiffusion(k=1, in_feat_name='h', diffuse_op='raw') > g = transform(g) tests/compute/test_transform.py:2476: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ python/dgl/transforms/module.py:1593: in __call__ feat_list = self.diffuse(g) python/dgl/transforms/module.py:1611: in raw g.update_all(message_func, fn.sum('m', self.in_feat_name)) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.5296, -0.6817], [ 0.9662, 0.1646], [ 0.9910, 1.7198], [-0.8319, -1.1789], ...3282], [-0.8515, -0.5536], [ 0.4702, 1.1942], [ 0.8706, 0.7244], [ 1.5997, -0.1258]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError =============================== warnings summary =============================== python/dgl/backend/backend.py:1741 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/backend/backend.py:1741: DeprecationWarning: invalid escape sequence \P """ python/dgl/backend/pytorch/tensor.py:16 python/dgl/backend/pytorch/tensor.py:16 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/backend/pytorch/tensor.py:16: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) < LooseVersion("1.9.0"): python/dgl/backend/pytorch/tensor.py:340 python/dgl/backend/pytorch/tensor.py:340 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/backend/pytorch/tensor.py:340: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) >= LooseVersion("1.10.0"): python/dgl/dataloading/dataloader.py:33 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/dataloading/dataloader.py:33: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(torch.__version__) python/dgl/_dataloading/pytorch/dataloader.py:23 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/_dataloading/pytorch/dataloader.py:23: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(th.__version__) python/dgl/_dataloading/pytorch/dataloader.py:24 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/_dataloading/pytorch/dataloader.py:24: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_16 = PYTORCH_VER >= LooseVersion("1.6.0") python/dgl/_dataloading/pytorch/dataloader.py:25 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/_dataloading/pytorch/dataloader.py:25: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_17 = PYTORCH_VER >= LooseVersion("1.7.0") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _nlv = LooseVersion(_np_version) ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p16 = _nlv < LooseVersion("1.16") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p17 = _nlv < LooseVersion("1.17") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p18 = _nlv < LooseVersion("1.18") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p19 = _nlv < LooseVersion("1.19") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p20 = _nlv < LooseVersion("1.20") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. other = LooseVersion(other) ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(_np_version) >= LooseVersion("1.17.0"): tests/compute/test_basics.py: 2 warnings tests/compute/test_filter.py: 1 warning tests/compute/test_graph.py: 9 warnings tests/compute/test_kernel.py: 3 warnings tests/compute/test_propagate.py: 2 warnings tests/compute/test_removal.py: 16 warnings tests/compute/test_serialize.py: 3 warnings tests/compute/test_specialization.py: 12 warnings tests/compute/test_subgraph.py: 2 warnings tests/compute/test_transform.py: 10 warnings tests/compute/test_traversal.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/compute/test_basics.py: 2 warnings tests/compute/test_batched_graph.py: 10 warnings tests/compute/test_graph.py: 2 warnings tests/compute/test_kernel.py: 1 warning tests/compute/test_propagate.py: 2 warnings tests/compute/test_removal.py: 10 warnings tests/compute/test_specialization.py: 10 warnings tests/compute/test_subgraph.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:354: DGLWarning: DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges dgl_warning("DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges") tests/compute/test_basics.py::test_update_all_0deg[idtype0] tests/compute/test_basics.py::test_update_all_0deg[idtype1] tests/compute/test_basics.py::test_pull_0deg[idtype0] tests/compute/test_basics.py::test_pull_0deg[idtype1] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype0] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype1] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/core.py:79: DGLWarning: The input graph for the user-defined edge function does not contain valid edges dgl_warning('The input graph for the user-defined edge function ' \ tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype0] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype1] tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_transform.py::test_no_backtracking tests/compute/test_transform.py::test_reverse[idtype0] tests/compute/test_transform.py::test_reverse[idtype1] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:2978: DGLWarning: DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids. dgl_warning("DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids.") tests/compute/test_batched_heterograph.py::test_features[idtype0] tests/compute/test_batched_heterograph.py::test_features[idtype1] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/batch.py:159: DGLWarning: Arguments edge_attrs has been deprecated. Please use edata instead. dgl_warning('Arguments edge_attrs has been deprecated. Please use' tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype0] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype1] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype0] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype1] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype0] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype1] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype0] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype1] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph_index.py:797: FutureWarning: Adjacency matrix by default currently returns edge IDs. As a result there is one 0 entry which is not eliminated. In the next release it will return 1s by default, and 0 will be eliminated otherwise. FutureWarning) tests/compute/test_data.py::test_minigc /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/data/minigc.py:159: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations self.labels = F.tensor(np.array(self.labels).astype(np.int)) tests/compute/test_data.py::test_fraud tests/compute/test_data.py::test_fraud /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/data/fraud.py:206: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations train_mask = np.zeros(N, dtype=np.bool) tests/compute/test_data.py::test_fraud tests/compute/test_data.py::test_fraud /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/data/fraud.py:207: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations val_mask = np.zeros(N, dtype=np.bool) tests/compute/test_data.py::test_fraud tests/compute/test_data.py::test_fraud /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/data/fraud.py:208: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations test_mask = np.zeros(N, dtype=np.bool) tests/compute/test_data.py::test_fakenews tests/compute/test_data.py::test_fakenews /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/data/fakenews.py:154: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations train_mask = np.zeros(num_graphs, dtype=np.bool) tests/compute/test_data.py::test_fakenews tests/compute/test_data.py::test_fakenews /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/data/fakenews.py:155: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations val_mask = np.zeros(num_graphs, dtype=np.bool) tests/compute/test_data.py::test_fakenews tests/compute/test_data.py::test_fakenews /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/data/fakenews.py:156: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations test_mask = np.zeros(num_graphs, dtype=np.bool) tests/compute/test_data.py::test_citation_graph /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/data/citation_graph.py:287: RuntimeWarning: divide by zero encountered in power r_inv = np.power(rowsum, -1).flatten() tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_data.py:1194: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations node_ids = np.arange(num_nodes, dtype=np.float) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_data.py:1231: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations src_ids = np.random.randint(num_nodes, size=num_edges).astype(np.float) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_data.py:1232: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations dst_ids = np.random.randint(num_nodes, size=num_edges).astype(np.float) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_data.py:1262: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations graph_ids = np.arange(num_graphs).astype(np.float) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_data.py:513: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations node_ids = np.array([], dtype=np.int) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_data.py:514: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations src_ids = np.array([], dtype=np.int) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_data.py:515: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations dst_ids = np.array([], dtype=np.int) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_data.py:516: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations ngraph_ids = np.array([], dtype=np.int) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_data.py:517: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations egraph_ids = np.array([], dtype=np.int) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_data.py:518: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations u_indices = np.array([], dtype=np.int) tests/compute/test_data.py::test_csvdataset /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/numpy/lib/arraysetops.py:604: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison mask &= (ar1 != a) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/data/csv_dataset_base.py:298: DGLWarning: Unamed column is found. Ignored... dgl_warning("Unamed column is found. Ignored...") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_heterograph.py::test_query[idtype0] tests/compute/test_heterograph.py::test_query[idtype1] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:2753: DGLWarning: DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes dgl_warning("DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:2687: DGLWarning: DGLGraph.__contains__ is deprecated. Please directly call has_nodes. dgl_warning('DGLGraph.__contains__ is deprecated.' tests/compute/test_graph.py::test_query tests/compute/test_sampling.py::test_non_uniform_random_walk[False] tests/compute/test_sampling.py::test_uniform_random_walk[False] tests/compute/test_sampling.py::test_node2vec tests/compute/test_transform.py::test_no_backtracking /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:2851: DGLWarning: DGLGraph.has_edge_between is deprecated. Please use DGLGraph.has_edges_between dgl_warning("DGLGraph.has_edge_between is deprecated. " tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:3432: DGLWarning: DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees dgl_warning("DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:3516: DGLWarning: DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees dgl_warning("DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees") tests/compute/test_graph.py::test_query /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly', 'sort_csr'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_heterograph.py:1128: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(src_i)) == nid[src[i]] tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_heterograph.py:1129: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(dst_i)) == nid[dst[i]] tests/compute/test_heterograph.py::test_invertible_conversion[idtype0] tests/compute/test_heterograph.py::test_invertible_conversion[idtype1] tests/compute/test_shared_mem.py::test_single_process[idtype0] tests/compute/test_shared_mem.py::test_single_process[idtype1] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:2635: DGLWarning: DGLGraph.is_readonly is deprecated in v0.5. DGLGraph now always supports mutable operations like add_nodes and add_edges. dgl_warning('DGLGraph.is_readonly is deprecated in v0.5.\n' tests/compute/test_heterograph.py::test_types_in_function[idtype0] tests/compute/test_heterograph.py::test_types_in_function[idtype1] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/backend/pytorch/tensor.py:277: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). mask = th.tensor(mask, dtype=th.bool) tests/compute/test_pickle.py::test_pickling_batched_heterograph /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/batch.py:511: DGLWarning: From v0.5, DGLHeteroGraph is merged into DGLGraph. You can safely replace dgl.batch_hetero with dgl.batch dgl_warning('From v0.5, DGLHeteroGraph is merged into DGLGraph. You can safely' tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/contrib/sampling/sampler.py:317: DGLWarning: dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5.' tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_layer_sampler tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/_deprecate/nodeflow.py:99: DGLWarning: NodeFlow APIs are deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('NodeFlow APIs are deprecated starting from v0.5. Please read our' tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/sampling/negative.py:102: ComplexWarning: Casting complex values to real discards the imaginary part g._graph, etype_id, num_samples, 3, exclude_self_loops, replace, redundancy) tests/compute/test_serialize.py::test_graph_serialize_with_feature[False] tests/compute/test_serialize.py::test_graph_serialize_without_feature[False] tests/compute/test_serialize.py::test_graph_serialize_with_labels[False] tests/compute/test_transform.py::test_simple_graph /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) tests/compute/test_serialize.py::test_load_old_files1 tests/compute/test_serialize.py::test_load_old_files2 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/data/graph_serialize.py:179: DGLWarning: You are loading a graph file saved by old version of dgl. Please consider saving it again with the current format. Please consider saving it again with the current format.") tests/compute/test_sparse.py: 40 warnings /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_sparse.py:299: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ tests/compute/test_sparse.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648@3/tests/compute/test_sparse.py:339: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(torch.version.cuda) < LooseVersion("11.0") \ tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/transforms/functional.py:1267: DGLWarning: share_ndata argument has been renamed to copy_ndata. dgl_warning('share_ndata argument has been renamed to copy_ndata.') tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/transforms/functional.py:1270: DGLWarning: share_edata argument has been renamed to copy_edata. dgl_warning('share_edata argument has been renamed to copy_edata.') tests/compute/test_transform.py::test_simple_graph /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/transforms/functional.py:1319: DGLWarning: dgl.to_simple_graph is renamed to dgl.to_simple in v0.5. dgl_warning('dgl.to_simple_graph is renamed to dgl.to_simple in v0.5.') tests/compute/test_transform.py::test_module_random_walk_pe[idtype0] /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/backend/pytorch/tensor.py:44: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /pytorch/torch/csrc/utils/tensor_numpy.cpp:180.) return th.as_tensor(data, dtype=dtype) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html - generated xml file: /root/jenkins/workspace/dgl_PR-4648@3/pytest_compute.xml - ============================ slowest 100 durations ============================= 148.52s call tests/compute/test_data.py::test_reddit 104.10s call tests/compute/test_data.py::test_fakenews 26.63s call tests/compute/test_data.py::test_as_graphpred 25.53s call tests/compute/test_data.py::test_as_graphpred_ogb 23.57s call tests/compute/test_data.py::test_gin 22.26s call tests/compute/test_sampler.py::test_negative_sampler 12.38s call tests/compute/test_data.py::test_as_linkpred_ogb 10.04s call tests/compute/test_data.py::test_tudataset_regression 8.16s call tests/compute/test_data.py::test_gnn_benchmark 7.70s call tests/compute/test_data.py::test_as_graphpred_reprocess 7.25s call tests/compute/test_data.py::test_as_nodepred_ogb 7.08s call tests/compute/test_data.py::test_wiki_cs 4.96s call tests/compute/test_data.py::test_fraud 4.18s call tests/compute/test_graph.py::test_query 3.28s call tests/compute/test_data.py::test_citation_graph 3.19s call tests/compute/test_data.py::test_add_nodepred_split 3.06s call tests/compute/test_sampling.py::test_sample_neighbors_outedge 3.00s call tests/compute/test_data.py::test_flickr 2.79s call tests/compute/test_heterograph.py::test_query[idtype1] 2.10s call tests/compute/test_transform.py::test_metis_partition[idtype1] 1.73s call tests/compute/test_heterograph.py::test_query[idtype0] 1.70s call tests/compute/test_sampling.py::test_sample_neighbors_noprob 1.66s call tests/compute/test_sampling.py::test_sample_neighbors_prob 1.39s call tests/compute/test_heterograph.py::test_level2[idtype0] 1.38s call tests/compute/test_data.py::test_explain_syn 1.13s call tests/compute/test_heterograph.py::test_level2[idtype1] 1.09s call tests/compute/test_heterograph.py::test_forking_pickler 0.91s call tests/compute/test_heterograph.py::test_view1[idtype0] 0.90s call tests/compute/test_heterograph.py::test_view1[idtype1] 0.82s call tests/compute/test_basics.py::test_update_routines[idtype0] 0.80s call tests/compute/test_basics.py::test_update_routines[idtype1] 0.66s call tests/compute/test_transform.py::test_reorder_nodes 0.61s call tests/compute/test_sampler.py::test_10neighbor_sampler_all 0.54s call tests/compute/test_data.py::test_csvdataset 0.44s call tests/compute/test_propagate.py::test_prop_edges_dfs[idtype0] 0.43s call tests/compute/test_propagate.py::test_prop_edges_dfs[idtype1] 0.39s call tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype0] 0.39s call tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype1] 0.39s call tests/compute/test_subgraph.py::test_in_subgraph[idtype0] 0.39s call tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype0-idtype1] 0.39s call tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype1] 0.38s call tests/compute/test_transform.py::test_remove_nodes[idtype1] 0.37s call tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype0] 0.36s call tests/compute/test_heterograph.py::test_types_in_function[idtype0] 0.36s call tests/compute/test_subgraph.py::test_out_subgraph[idtype1] 0.35s call tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype0-idtype1] 0.35s call tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype1-idtype0] 0.34s call tests/compute/test_batched_graph.py::test_batch_propagate[idtype0] 0.34s call tests/compute/test_sampling.py::test_sample_neighbors_topk 0.34s call tests/compute/test_batched_graph.py::test_batch_propagate[idtype1] 0.33s call tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype0] 0.33s call tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype1-idtype1] 0.33s call tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype0-idtype0] 0.32s call tests/compute/test_sampling.py::test_uniform_random_walk[False] 0.32s call tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype1] 0.32s call tests/compute/test_csrmm.py::test_csrmm_backward[1-dtype0-idtype0] 0.31s call tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype1-idtype1] 0.30s call tests/compute/test_basics.py::test_pull_0deg[idtype1] 0.29s call tests/compute/test_sampling.py::test_non_uniform_random_walk[False] 0.29s call tests/compute/test_data.py::test_minigc 0.27s call tests/compute/test_specialization.py::test_pull_multi_fallback[idtype0] 0.27s call tests/compute/test_specialization.py::test_pull_multi_fallback[idtype1] 0.27s call tests/compute/test_data.py::test_as_nodepred2 0.26s call tests/compute/test_subgraph.py::test_in_subgraph[idtype1] 0.26s call tests/compute/test_sampling.py::test_sample_neighbors_biased_bipartite 0.25s call tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype0] 0.25s call tests/compute/test_csrmm.py::test_csrmm_backward[2-dtype1-idtype0] 0.24s call tests/compute/test_heterograph.py::test_empty_query[idtype1] 0.23s call tests/compute/test_batched_heterograph.py::test_slice_batch[idtype1] 0.21s call tests/compute/test_batched_heterograph.py::test_slice_batch[idtype0] 0.21s call tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype0] 0.21s call tests/compute/test_basics.py::test_pull_0deg[idtype0] 0.20s call tests/compute/test_basics.py::test_send_multigraph[idtype0] 0.20s call tests/compute/test_subgraph.py::test_out_subgraph[idtype0] 0.20s call tests/compute/test_sampling.py::test_pinsage_sampling[False] 0.20s call tests/compute/test_batched_graph.py::test_batch_send_and_recv[idtype0] 0.19s call tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype1] 0.19s call tests/compute/test_sampling.py::test_sample_neighbors_biased_homogeneous 0.19s call tests/compute/test_transform.py::test_reorder_graph[idtype0] 0.18s call tests/compute/test_batched_graph.py::test_set_batch_info[idtype0] 0.18s call tests/compute/test_batched_graph.py::test_batch_send_and_recv[idtype1] 0.18s call tests/compute/test_batched_graph.py::test_set_batch_info[idtype1] 0.18s call tests/compute/test_heterograph.py::test_format[idtype0] 0.18s call tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype1] 0.18s call tests/compute/test_csrmm.py::test_csrmask_backward[dtype1-idtype0] 0.17s call tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype1] 0.17s call tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype1-idtype1] 0.17s call tests/compute/test_csrmm.py::test_csrmask_backward[dtype0-idtype1] 0.17s call tests/compute/test_csrmm.py::test_csrmask_backward[dtype0-idtype0] 0.17s call tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype1] 0.16s call tests/compute/test_heterograph.py::test_ismultigraph[idtype0] 0.16s call tests/compute/test_basics.py::test_degree_bucket_edge_ordering[idtype1] 0.16s call tests/compute/test_sort.py::test_sort_with_tag[idtype0] 0.16s call tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype0-idtype0] 0.16s call tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype1-idtype0] 0.16s call tests/compute/test_propagate.py::test_prop_nodes_topo[idtype0] 0.15s call tests/compute/test_csrmm.py::test_csrsum_backward[2-dtype0-idtype1] 0.15s call tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype0] 0.15s call tests/compute/test_basics.py::test_update_all_0deg[idtype0] 0.15s call tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype1] =========================== short test summary info ============================ FAILED tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype0] FAILED tests/compute/test_apply_edges_hetero.py::test_unary_copy_u[idtype1] FAILED tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype0] - Ty... FAILED tests/compute/test_apply_edges_hetero.py::test_binary_op[idtype1] - Ty... FAILED tests/compute/test_basics.py::test_issue_1088[idtype0] - TypeError: em... FAILED tests/compute/test_basics.py::test_issue_1088[idtype1] - TypeError: em... FAILED tests/compute/test_basics.py::test_issue_2484[idtype0] - TypeError: em... FAILED tests/compute/test_basics.py::test_issue_2484[idtype1] - TypeError: em... FAILED tests/compute/test_edge_softmax_hetero.py::test_edge_softmax_unidirectional FAILED tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype0-src-g0] FAILED tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype0-dst-g0] FAILED tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype1-src-g0] FAILED tests/compute/test_edge_softmax_hetero.py::test_edge_softmax[idtype1-dst-g0] FAILED tests/compute/test_heterograph.py::test_updates[idtype0] - TypeError: ... FAILED tests/compute/test_heterograph.py::test_updates[idtype1] - TypeError: ... FAILED tests/compute/test_heterograph.py::test_backward[idtype0] - TypeError:... FAILED tests/compute/test_heterograph.py::test_backward[idtype1] - TypeError:... FAILED tests/compute/test_kernel.py::test_copy_src_reduce - TypeError: empty_... FAILED tests/compute/test_kernel.py::test_copy_edge_reduce - TypeError: empty... FAILED tests/compute/test_kernel.py::test_all_binary_builtins - TypeError: em... FAILED tests/compute/test_kernel.py::test_mean_zero_degree[g0-idtype0] - Type... FAILED tests/compute/test_kernel.py::test_mean_zero_degree[g0-idtype1] - Type... FAILED tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype0] FAILED tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype1] FAILED tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype0] FAILED tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype1] FAILED tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype0] FAILED tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype1] FAILED tests/compute/test_readout.py::test_sum_case1[idtype0] - TypeError: em... FAILED tests/compute/test_readout.py::test_sum_case1[idtype1] - TypeError: em... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g0-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g0-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g1-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g1-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g2-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g2-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g3-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g3-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g4-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g4-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g5-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g5-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g6-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[sum-g6-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g0-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g0-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g1-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g1-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g2-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g2-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g3-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g3-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g4-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g4-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g5-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g5-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g6-idtype0] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[max-g6-idtype1] - T... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g0-idtype0] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g0-idtype1] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g1-idtype0] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g1-idtype1] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g2-idtype0] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g2-idtype1] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g3-idtype0] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g3-idtype1] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g4-idtype0] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g4-idtype1] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g5-idtype0] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g5-idtype1] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g6-idtype0] - ... FAILED tests/compute/test_readout.py::test_reduce_readout[mean-g6-idtype1] - ... FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-idtype1] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-idtype0] FAILED tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-idtype1] FAILED tests/compute/test_readout.py::test_softmax[g0-idtype0] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g0-idtype1] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g1-idtype0] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g1-idtype1] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g2-idtype0] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g2-idtype1] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g3-idtype0] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g3-idtype1] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g4-idtype0] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g4-idtype1] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g5-idtype0] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g5-idtype1] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g6-idtype0] - TypeError: e... FAILED tests/compute/test_readout.py::test_softmax[g6-idtype1] - TypeError: e... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp0-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp0-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp1-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp1-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp2-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp2-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp3-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp3-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp4-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp4-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp5-g0] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp5-g1] - Typ... FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp0-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp0-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp1-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp1-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp2-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp2-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp3-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp3-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp4-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp4-g1] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp5-g0] FAILED tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp5-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp0-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp0-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp1-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp1-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp2-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp2-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp3-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp3-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp4-g0] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp4-g1] - Ty... FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g1] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g0] FAILED tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g1] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-src-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-dst-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-src-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-dst-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-src-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-dst-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-src-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-dst-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-src-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-dst-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-src-g0] FAILED tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-dst-g0] FAILED tests/compute/test_sparse.py::test_segment_reduce[sum] - TypeError: em... FAILED tests/compute/test_sparse.py::test_segment_reduce[max] - TypeError: em... FAILED tests/compute/test_sparse.py::test_segment_reduce[min] - TypeError: em... FAILED tests/compute/test_sparse.py::test_segment_reduce[mean] - TypeError: e... FAILED tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-idtype0] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-idtype1] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-idtype0] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-idtype1] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-idtype0] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-idtype1] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-idtype0] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-idtype1] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-idtype0] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-idtype1] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-idtype0] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-idtype1] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-idtype0] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-idtype1] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-idtype0] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-idtype1] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-idtype0] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-idtype1] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-idtype0] FAILED tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-idtype1] FAILED tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-1] - A... FAILED tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-8] - A... FAILED tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-16] - ... FAILED tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-64] - ... FAILED tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-256] FAILED tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-1] - ... FAILED tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-8] - ... FAILED tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-16] FAILED tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-64] FAILED tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-256] FAILED tests/compute/test_sparse.py::test_use_libxsmm_switch - TypeError: emp... FAILED tests/compute/test_specialization.py::test_v2v_update_all[idtype0] - T... FAILED tests/compute/test_specialization.py::test_v2v_update_all[idtype1] - T... FAILED tests/compute/test_specialization.py::test_v2v_snr[idtype0] - TypeErro... FAILED tests/compute/test_specialization.py::test_v2v_snr[idtype1] - TypeErro... FAILED tests/compute/test_specialization.py::test_v2v_pull[idtype0] - TypeErr... FAILED tests/compute/test_specialization.py::test_v2v_pull[idtype1] - TypeErr... FAILED tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype0] FAILED tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype1] FAILED tests/compute/test_specialization.py::test_pull_multi_fallback[idtype0] FAILED tests/compute/test_specialization.py::test_pull_multi_fallback[idtype1] FAILED tests/compute/test_specialization.py::test_spmv_3d_feat[idtype0] - Typ... FAILED tests/compute/test_specialization.py::test_spmv_3d_feat[idtype1] - Typ... FAILED tests/compute/test_transform.py::test_khop_graph - TypeError: empty_co... FAILED tests/compute/test_transform.py::test_khop_adj - TypeError: empty_cont... FAILED tests/compute/test_transform.py::test_to_simple[idtype0] - TypeError: ... FAILED tests/compute/test_transform.py::test_to_simple[idtype1] - TypeError: ... FAILED tests/compute/test_transform.py::test_remove_edges[idtype0] - TypeErro... FAILED tests/compute/test_transform.py::test_remove_edges[idtype1] - TypeErro... FAILED tests/compute/test_transform.py::test_remove_nodes[idtype0] - TypeErro... FAILED tests/compute/test_transform.py::test_remove_nodes[idtype1] - TypeErro... FAILED tests/compute/test_transform.py::test_add_selfloop[idtype0] - TypeErro... FAILED tests/compute/test_transform.py::test_add_selfloop[idtype1] - TypeErro... FAILED tests/compute/test_transform.py::test_remove_selfloop[idtype0] - TypeE... FAILED tests/compute/test_transform.py::test_remove_selfloop[idtype1] - TypeE... FAILED tests/compute/test_transform.py::test_module_gcnnorm[idtype0] - TypeEr... FAILED tests/compute/test_transform.py::test_module_gcnnorm[idtype1] - TypeEr... FAILED tests/compute/test_transform.py::test_module_sign[g0] - TypeError: emp... ==== 1474 failed, 911 passed, 73 skipped, 322 warnings in 566.50s (0:09:26) ==== Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... Server (2) shutdown. Server is exiting... FAIL: compute PASSED [ 38%] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-True-False-4-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. torch.int64 Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:33:51] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.002 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.003s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.007s, peak mem: 1.529 GB part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.018 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. [Pipeline] } [Pipeline] // timeout [Pipeline] } [Pipeline] // stage [Pipeline] stage [Pipeline] { (Torch CPU Example test) Stage "Torch CPU Example test" skipped due to earlier failure(s) [Pipeline] } [Pipeline] // stage [Pipeline] stage [Pipeline] { (Torch CPU Tutorial test) Stage "Torch CPU Tutorial test" skipped due to earlier failure(s) [Pipeline] } [Pipeline] // stage Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:33:52] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:52] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:15094]... server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:33:53] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:53] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:15097]... [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 af5aa5eeb7433ab669abbd955ede40baf0b228908472383bbed72d33561f7092 server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:33:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:15100]... [05:33:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. $ docker rm -f af5aa5eeb7433ab669abbd955ede40baf0b228908472383bbed72d33561f7092 [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } Running on dgl-manual-large-cpu in /root/jenkins/workspace/dgl_PR-4648@3 [Pipeline] // node [Pipeline] { [Pipeline] } [Pipeline] // stage [Pipeline] } Failed in branch Torch CPU [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Cloning the remote Git repository Cloning with configured refspecs honoured and without tags [05:33:56] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:56] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:33:56] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:56] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:33:56] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:56] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:33:56] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:33:56] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Cloning repository https://github.com/dmlc/dgl.git > git init /root/jenkins/workspace/dgl_PR-4648@3 # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Client [1890] waits on 172.17.0.3:60767 Client [1892] waits on 172.17.0.3:58815 Client [1889] waits on 172.17.0.3:33889 Client [1885] waits on 172.17.0.3:59449 Client [1891] waits on 172.17.0.3:42885 Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (4) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly! Machine (0) group (0) client (1) connect to server successfuly! Machine (0) group (0) client (3) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Commit message: "fix for pytorch < 1.12" Cleaning workspace NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Client[3] in group[0] is exiting... Client[0] in group[0] is exiting... > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git config --add remote.origin.fetch +refs/pull/4648/head:refs/remotes/origin/PR-4648 # timeout=10 > git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 No valid HEAD. Skipping the resetting > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 [Pipeline] withEnv [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh Client[4] in group[0] is exiting... + docker pull dgllib/dgl-ci-cpu:v220816 Client[1] in group[0] is exiting... Client[2] in group[0] is exiting... v220816: Pulling from dgllib/dgl-ci-cpu Digest: sha256:64b385c33b44dc57cb96ff264a84d8dfb8ced0caa9b30fbc4cec6d5ee511b099 Status: Image is up to date for dgllib/dgl-ci-cpu:v220816 docker.io/dgllib/dgl-ci-cpu:v220816 [Pipeline] } [Pipeline] // withEnv [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... + docker inspect -f . dgllib/dgl-ci-cpu:v220816 . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dgl-manual-large-cpu does not seem to be running inside a container $ docker run -t -d -u 0:0 -w /root/jenkins/workspace/dgl_PR-4648@3 -v /root/jenkins/workspace/dgl_PR-4648@3:/root/jenkins/workspace/dgl_PR-4648@3:rw,z -v /root/jenkins/workspace/dgl_PR-4648@3@tmp:/root/jenkins/workspace/dgl_PR-4648@3@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-cpu:v220816 cat $ docker top 39027e0569c91f60d197e31fb8f36454bc8a908bd28bdc225aa7c1adb21f2213 -eo pid,comm [Pipeline] { [Pipeline] stage [Pipeline] { (DGL-Go CPU test) [Pipeline] sh Server (2) shutdown. Server is exiting... + rm -rf CMakeLists.txt CONTRIBUTORS.md Jenkinsfile LICENSE NEWS.md README.md apps benchmarks cmake conda dglgo docker docs examples featgraph include pyproject.toml python readthedocs.yml src tensoradapter tests third_party tools tutorials [Pipeline] checkout The recommended git tool is: git using credential 150de63f-189c-4717-bcaf-010460d2f51a Warning: JENKINS-30600: special launcher org.jenkinsci.plugins.docker.workflow.WithContainerStep$Decorator$1@3255d550; decorates RemoteLauncher[hudson.remoting.Channel@1d9b9638:dgl-manual-large-cpu] will be ignored (a typical symptom is the Git executable not being run inside a designated container) Fetching changes from the remote Git repository Cleaning workspace Fetching without tags Merging remotes/origin/master commit d78a3a4baf611b90871a849f58647160c7cd9ab4 into PR head commit 37ed78035f406940ed88a4b60cadd2b7cfde5fea Merge succeeded, producing 37ed78035f406940ed88a4b60cadd2b7cfde5fea Checking out Revision 37ed78035f406940ed88a4b60cadd2b7cfde5fea (PR-4648) Commit message: "fix for pytorch < 1.12" Cleaning workspace > git rev-parse --resolve-git-dir /root/jenkins/workspace/dgl_PR-4648@3/.git # timeout=10 > git config remote.origin.url https://github.com/dmlc/dgl.git # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 > git clean -fdx # timeout=10 Fetching upstream changes from https://github.com/dmlc/dgl.git > git --version # timeout=10 > git --version # 'git version 2.17.1' using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git fetch --no-tags --progress -- https://github.com/dmlc/dgl.git +refs/pull/4648/head:refs/remotes/origin/PR-4648 +refs/heads/master:refs/remotes/origin/master # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git remote # timeout=10 > git config --get remote.origin.url # timeout=10 using GIT_ASKPASS to set credentials dgl-bot-personal-access-key220906 > git merge d78a3a4baf611b90871a849f58647160c7cd9ab4 # timeout=10 > git rev-parse HEAD^{commit} # timeout=10 > git config core.sparsecheckout # timeout=10 > git checkout -f 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git branch -a -v --no-abbrev # timeout=10 > git branch -D PR-4648 # timeout=10 > git checkout -b PR-4648 37ed78035f406940ed88a4b60cadd2b7cfde5fea # timeout=10 > git rev-parse --verify HEAD # timeout=10 Resetting working tree > git reset --hard # timeout=10 PASSED [ 40%] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-True-0-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. torch.int64 Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:34:05] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.002 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.003s, peak mem: 1.529 GB Split the graph: 0.002 seconds Construct subgraphs: 0.001 seconds Splitting the graph into partitions takes 0.003s, peak mem: 1.529 GB Calculate edge assignment: 0.000 seconds part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.015 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. [Pipeline] sh + git submodule update --recursive --init > git clean -fdx # timeout=10 Submodule 'third_party/METIS' (https://github.com/KarypisLab/METIS.git) registered for path 'third_party/METIS' Submodule 'third_party/dlpack' (https://github.com/dmlc/dlpack.git) registered for path 'third_party/dlpack' Submodule 'third_party/dmlc-core' (https://github.com/dmlc/dmlc-core.git) registered for path 'third_party/dmlc-core' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/googletest' Submodule 'third_party/libxsmm' (https://github.com/hfp/libxsmm.git) registered for path 'third_party/libxsmm' Submodule 'third_party/nanoflann' (https://github.com/jlblancoc/nanoflann) registered for path 'third_party/nanoflann' Submodule 'third_party/nccl' (https://github.com/nvidia/nccl) registered for path 'third_party/nccl' Submodule 'third_party/phmap' (https://github.com/greg7mdp/parallel-hashmap.git) registered for path 'third_party/phmap' Submodule 'third_party/tensorpipe' (https://github.com/pytorch/tensorpipe) registered for path 'third_party/tensorpipe' Submodule 'third_party/thrust' (https://github.com/NVIDIA/thrust.git) registered for path 'third_party/thrust' Submodule 'third_party/tvm' (https://github.com/apache/incubator-tvm) registered for path 'third_party/tvm' Submodule 'third_party/xbyak' (https://github.com/herumi/xbyak) registered for path 'third_party/xbyak' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/METIS'... server: #clients=1 load test_sampling start graph service on server 0 for part 0 [05:34:05] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:05] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:17055]... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/dlpack'... server: #clients=1 load test_sampling start graph service on server 1 for part 1 [05:34:07] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:07] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:17058]... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/dmlc-core'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/googletest'... server: #clients=1 load test_sampling start graph service on server 2 for part 2 [05:34:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:17061]... [05:34:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/libxsmm'... Client [1991] waits on 172.17.0.3:52371 Machine (0) group (0) client (0) connect to server successfuly! NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Client[0] in group[0] is exiting... Server (2) shutdown. Server is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 41%] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-True-4-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. torch.int64 Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:34:13] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.529 GB Split the graph: 0.003 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.004s, peak mem: 1.529 GB Calculate edge assignment: 0.000 seconds part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.019 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. server: #clients=5 load test_sampling start graph service on server 0 for part 0 [05:34:14] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:14] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21029]... server: #clients=5 load test_sampling start graph service on server 1 for part 1 [05:34:15] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:15] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21032]... server: #clients=5 load test_sampling start graph service on server 2 for part 2 [05:34:16] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:16] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21035]... [05:34:17] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:17] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:34:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:34:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:34:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:34:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Client [2026] waits on 172.17.0.3:42933 Client [2025] waits on 172.17.0.3:58535Client [2027] waits on 172.17.0.3:56369 Client [2028] waits on 172.17.0.3:45837 Client [2021] waits on 172.17.0.3:35301 Machine (0) group (0) client (1) connect to server successfuly! Machine (0) group (0) client (3) connect to server successfuly! Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly!Machine (0) group (0) client (4) connect to server successfuly! NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Client[0] in group[0] is exiting... Client[4] in group[0] is exiting... Client[1] in group[0] is exiting... Client[3] in group[0] is exiting... Client[2] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/nanoflann'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/nccl'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/phmap'... Server (2) shutdown. Server is exiting... PASSED [ 43%] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-False-0-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. torch.int64 Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:34:25] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.002 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.003s, peak mem: 1.529 GB Split the graph: 0.002 seconds Construct subgraphs: 0.001 seconds Splitting the graph into partitions takes 0.004s, peak mem: 1.529 GB Calculate edge assignment: 0.000 seconds part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.017 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe'... server: #clients=1 load test_sampling start graph service on server 0 for part 0 [05:34:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13349]... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/thrust'... server: #clients=1 load test_sampling start graph service on server 1 for part 1 [05:34:27] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:27] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13352]... server: #clients=1 load test_sampling start graph service on server 2 for part 2 [05:34:28] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:28] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13355]... [05:34:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm'... Client [2127] waits on 172.17.0.3:47999 Machine (0) group (0) client (0) connect to server successfuly! NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... Server (2) shutdown. Server is exiting... PASSED [ 45%] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-False-4-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. torch.int64 Converting to homogeneous graph takes 0.001s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:34:34] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.002 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.003s, peak mem: 1.529 GB Split the graph: 0.002 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.004s, peak mem: 1.529 GB Calculate edge assignment: 0.000 seconds part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.017 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/xbyak'... Submodule path 'third_party/METIS': checked out '10603482cc055626ff99d2ac3ab7e73d1119439d' Submodule 'GKlib' (https://github.com/KarypisLab/GKlib.git) registered for path 'third_party/METIS/GKlib' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/METIS/GKlib'... Submodule path 'third_party/METIS/GKlib': checked out '62de20c7f97c719abdc96d7c3e266a1f7cb52bc5' Submodule path 'third_party/dlpack': checked out 'e2bdd3bee8cb6501558042633fa59144cc8b7f5f' Submodule path 'third_party/dmlc-core': checked out 'bfad207b448480783a1f428ae3d93d87032d8349' server: #clients=5 load test_sampling start graph service on server 0 for part 0 [05:34:35] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:35] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13039]... Submodule path 'third_party/googletest': checked out 'f71fb4f9a912ec945401cc49a287a759b6131026' server: #clients=5 load test_sampling start graph service on server 1 for part 1 [05:34:36] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:36] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13042]... Submodule path 'third_party/libxsmm': checked out 'fa687556130b6298430f1c0555a14cf79ab6101c' Submodule path 'third_party/nanoflann': checked out '4c47ca200209550c5628c89803591f8a753c8181' Submodule path 'third_party/nccl': checked out 'e11238b3029795d33f958b5868d47c90c4f22628' Submodule path 'third_party/phmap': checked out '25293cefd8b85491b45600c03fe8edf07647553f' Submodule path 'third_party/tensorpipe': checked out '6042f1a4cbce8eef997f11ed0012de137b317361' Submodule 'third_party/googletest' (https://github.com/google/googletest.git) registered for path 'third_party/tensorpipe/third_party/googletest' Submodule 'third_party/libnop' (https://github.com/google/libnop.git) registered for path 'third_party/tensorpipe/third_party/libnop' Submodule 'third_party/libuv' (https://github.com/libuv/libuv.git) registered for path 'third_party/tensorpipe/third_party/libuv' Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/tensorpipe/third_party/pybind11' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/googletest'... server: #clients=5 load test_sampling start graph service on server 2 for part 2 [05:34:37] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:37] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13045]... [05:34:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/libnop'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/libuv'... [05:34:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:34:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:34:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:34:40] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:40] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/pybind11'... Submodule path 'third_party/tensorpipe/third_party/googletest': checked out 'aee0f9d9b5b87796ee8a0ab26b7587ec30e8858e' Submodule path 'third_party/tensorpipe/third_party/libnop': checked out 'aa95422ea8c409e3f078d2ee7708a5f59a8b9fa2' Submodule path 'third_party/tensorpipe/third_party/libuv': checked out '1dff88e5161cba5c59276d2070d2e304e4dcb242' Submodule path 'third_party/tensorpipe/third_party/pybind11': checked out 'a23996fce38ff6ccfbcdc09f1e63f2c4be5ea2ef' Submodule 'tools/clang' (https://github.com/wjakob/clang-cindex-python3) registered for path 'third_party/tensorpipe/third_party/pybind11/tools/clang' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tensorpipe/third_party/pybind11/tools/clang'... Client [2163] waits on 172.17.0.3:37605Client [2165] waits on 172.17.0.3:39021 Client [2166] waits on 172.17.0.3:54221 Client [2157] waits on 172.17.0.3:55859 Client [2164] waits on 172.17.0.3:57233 Machine (0) group (0) client (1) connect to server successfuly! Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly! Machine (0) group (0) client (4) connect to server successfuly! Machine (0) group (0) client (3) connect to server successfuly! NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Submodule path 'third_party/tensorpipe/third_party/pybind11/tools/clang': checked out '6a00cbc4a9b8e68b71caf7f774b3f9c753ae84d5' Client[3] in group[0] is exiting... Client[0] in group[0] is exiting... Submodule path 'third_party/thrust': checked out '6a3078c64cab0e2f276340fa5dcafa0d758ed890' Submodule 'cub' (https://github.com/NVIDIA/cub.git) registered for path 'third_party/thrust/dependencies/cub' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/thrust/dependencies/cub'... Client[4] in group[0] is exiting... Client[1] in group[0] is exiting... Client[2] in group[0] is exiting... Server (0) shutdown. Server is exiting... Submodule path 'third_party/thrust/dependencies/cub': checked out 'cdaa9558a85e45d849016e5fe7b6e4ee79113f95' Submodule path 'third_party/tvm': checked out 'b2e418cb109df4cd1f17a2cf2894a1b396a6b838' Submodule 'dlpack' (https://github.com/dmlc/dlpack) registered for path 'third_party/tvm/3rdparty/dlpack' Submodule 'dmlc-core' (https://github.com/dmlc/dmlc-core) registered for path 'third_party/tvm/3rdparty/dmlc-core' Submodule '3rdparty/rang' (https://github.com/agauniyal/rang) registered for path 'third_party/tvm/3rdparty/rang' Submodule '3rdparty/vta-hw' (https://github.com/apache/incubator-tvm-vta) registered for path 'third_party/tvm/3rdparty/vta-hw' Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/dlpack'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/dmlc-core'... Server (1) shutdown. Server is exiting... Server (2) shutdown. Server is exiting... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/rang'... Cloning into '/root/jenkins/workspace/dgl_PR-4648@3/third_party/tvm/3rdparty/vta-hw'... PASSED [ 47%] tests/distributed/test_mp_dataloader.py::test_dataloader[node-0-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.002s, peak mem: 1.529 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.529 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.529 GB [05:34:48] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.529 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.529 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.003 seconds Construct subgraphs: 0.005 seconds Splitting the graph into partitions takes 0.009s, peak mem: 1.529 GB part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.029 seconds, peak memory: 1.529 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. Submodule path 'third_party/tvm/3rdparty/dlpack': checked out '3ec04430e89a6834e5a1b99471f415fa939bf642' Submodule path 'third_party/tvm/3rdparty/dmlc-core': checked out '6c401e242c59a1f4c913918246591bb13fd714e7' Submodule path 'third_party/tvm/3rdparty/rang': checked out 'cabe04d6d6b05356fa8f9741704924788f0dd762' Submodule path 'third_party/tvm/3rdparty/vta-hw': checked out '87ce9acfae550d1a487746e9d06c2e250076e54c' Submodule path 'third_party/xbyak': checked out '757e4063f6464740b8ff4a2cae9136d2f8458020' [Pipeline] unstash [Pipeline] echo Unpacked build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-*-x86_64-linux-gnu.so, build/tensoradapter/pytorch/*.so from dgl-cpu-linux [Pipeline] timeout Timeout set to expire in 20 min [Pipeline] { [Pipeline] sh + bash tests/scripts/task_go_test.sh ~/jenkins/workspace/dgl_PR-4648@3/dglgo ~/jenkins/workspace/dgl_PR-4648@3 server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:34:49] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:49] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:22449]... WARNING: Skipping dglgo as it is not installed. WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv PASSED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-int32] PASSED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-int64] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[int32] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[int64] PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_id SKIPPED (NCCL only runs on ...) [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_remainder SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_remainder SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_range SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_range SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_support SKIPPED (NCCL only run...) [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[int32] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[int64] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[int32] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[int64] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[int32] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[int64] SKIPPED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[int32] SKIPPED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[int64] SKIPPED [ 11%] tests/compute/test_pickle.py::test_pickling_index PASSED [ 11%] tests/compute/test_pickle.py::test_pickling_graph_index PASSED [ 11%] tests/compute/test_pickle.py::test_pickling_graph[g0-int32] PASSED [ 11%] tests/compute/test_pickle.py::test_pickling_graph[g0-int64] PASSED [ 11%] tests/compute/test_pickle.py::test_pickling_graph[g1-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g1-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g11-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g11-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g12-int32] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g12-int64] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_batched_heterograph PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_subgraph PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[int32] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[int64] SKIPPED [ 13%] tests/compute/test_pin_memory.py::test_pin_unpin SKIPPED (Need gpu f...) [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[int32] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[int64] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[int32] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[int64] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[int32] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[int64] PASSED [ 13%] tests/compute/test_random.py::test_random_choice PASSED [ 13%] tests/compute/test_readout.py::test_sum_case1[int32] PASSED [ 13%] tests/compute/test_readout.py::test_sum_case1[int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-int64] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-int32] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-int64] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-int32] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-int64] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-int32] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-int64] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-int32] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-int64] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-int32] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-int32] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-int64] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-int64] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-int32] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-int64] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:34:50] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:50] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:22452]... PASSED [ 16%] tests/compute/test_readout.py::test_topk[True-g0-int32] PASSED [ 16%] tests/compute/test_readout.py::test_topk[True-g0-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-int32] running install PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-int64] running bdist_egg running egg_info creating dglgo.egg-info PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-int32] writing dglgo.egg-info/PKG-INFO writing dependency_links to dglgo.egg-info/dependency_links.txt server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:34:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:22455]... PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-int64] writing entry points to dglgo.egg-info/entry_points.txt writing requirements to dglgo.egg-info/requires.txt PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-int32] writing top-level names to dglgo.egg-info/top_level.txt writing manifest file 'dglgo.egg-info/SOURCES.txt' reading manifest file 'dglgo.egg-info/SOURCES.txt' writing manifest file 'dglgo.egg-info/SOURCES.txt' installing library code to build/bdist.linux-x86_64/egg running install_lib running build_py creating build creating build/lib creating build/lib/recipes copying recipes/__init__.py -> build/lib/recipes creating build/lib/dglgo copying dglgo/__init__.py -> build/lib/dglgo creating build/lib/dglgo/pipeline copying dglgo/pipeline/__init__.py -> build/lib/dglgo/pipeline creating build/lib/dglgo/utils copying dglgo/utils/early_stop.py -> build/lib/dglgo/utils copying dglgo/utils/optimizer_config.py -> build/lib/dglgo/utils copying dglgo/utils/enter_config.py -> build/lib/dglgo/utils copying dglgo/utils/yaml_dump.py -> build/lib/dglgo/utils copying dglgo/utils/__init__.py -> build/lib/dglgo/utils copying dglgo/utils/factory.py -> build/lib/dglgo/utils copying dglgo/utils/base_model.py -> build/lib/dglgo/utils creating build/lib/dglgo/apply_pipeline copying dglgo/apply_pipeline/__init__.py -> build/lib/dglgo/apply_pipeline creating build/lib/dglgo/model copying dglgo/model/__init__.py -> build/lib/dglgo/model creating build/lib/dglgo/cli copying dglgo/cli/config_apply_cli.py -> build/lib/dglgo/cli copying dglgo/cli/config_cli.py -> build/lib/dglgo/cli copying dglgo/cli/train_cli.py -> build/lib/dglgo/cli copying dglgo/cli/cli.py -> build/lib/dglgo/cli copying dglgo/cli/export_cli.py -> build/lib/dglgo/cli copying dglgo/cli/recipe_cli.py -> build/lib/dglgo/cli copying dglgo/cli/__init__.py -> build/lib/dglgo/cli copying dglgo/cli/apply_cli.py -> build/lib/dglgo/cli creating build/lib/dglgo/pipeline/nodepred_sample copying dglgo/pipeline/nodepred_sample/__init__.py -> build/lib/dglgo/pipeline/nodepred_sample copying dglgo/pipeline/nodepred_sample/gen.py -> build/lib/dglgo/pipeline/nodepred_sample creating build/lib/dglgo/pipeline/graphpred copying dglgo/pipeline/graphpred/__init__.py -> build/lib/dglgo/pipeline/graphpred copying dglgo/pipeline/graphpred/gen.py -> build/lib/dglgo/pipeline/graphpred creating build/lib/dglgo/pipeline/linkpred copying dglgo/pipeline/linkpred/__init__.py -> build/lib/dglgo/pipeline/linkpred copying dglgo/pipeline/linkpred/gen.py -> build/lib/dglgo/pipeline/linkpred creating build/lib/dglgo/pipeline/nodepred copying dglgo/pipeline/nodepred/__init__.py -> build/lib/dglgo/pipeline/nodepred copying dglgo/pipeline/nodepred/gen.py -> build/lib/dglgo/pipeline/nodepred creating build/lib/dglgo/apply_pipeline/nodepred_sample copying dglgo/apply_pipeline/nodepred_sample/__init__.py -> build/lib/dglgo/apply_pipeline/nodepred_sample copying dglgo/apply_pipeline/nodepred_sample/gen.py -> build/lib/dglgo/apply_pipeline/nodepred_sample creating build/lib/dglgo/apply_pipeline/graphpred copying dglgo/apply_pipeline/graphpred/__init__.py -> build/lib/dglgo/apply_pipeline/graphpred copying dglgo/apply_pipeline/graphpred/gen.py -> build/lib/dglgo/apply_pipeline/graphpred creating build/lib/dglgo/apply_pipeline/nodepred copying dglgo/apply_pipeline/nodepred/__init__.py -> build/lib/dglgo/apply_pipeline/nodepred copying dglgo/apply_pipeline/nodepred/gen.py -> build/lib/dglgo/apply_pipeline/nodepred creating build/lib/dglgo/model/node_encoder copying dglgo/model/node_encoder/sage.py -> build/lib/dglgo/model/node_encoder copying dglgo/model/node_encoder/sgc.py -> build/lib/dglgo/model/node_encoder copying dglgo/model/node_encoder/gcn.py -> build/lib/dglgo/model/node_encoder copying dglgo/model/node_encoder/gin.py -> build/lib/dglgo/model/node_encoder copying dglgo/model/node_encoder/__init__.py -> build/lib/dglgo/model/node_encoder copying dglgo/model/node_encoder/gat.py -> build/lib/dglgo/model/node_encoder creating build/lib/dglgo/model/graph_encoder copying dglgo/model/graph_encoder/gin_ogbg.py -> build/lib/dglgo/model/graph_encoder copying dglgo/model/graph_encoder/__init__.py -> build/lib/dglgo/model/graph_encoder copying dglgo/model/graph_encoder/pna.py -> build/lib/dglgo/model/graph_encoder creating build/lib/dglgo/model/edge_encoder copying dglgo/model/edge_encoder/bilinear.py -> build/lib/dglgo/model/edge_encoder copying dglgo/model/edge_encoder/dot.py -> build/lib/dglgo/model/edge_encoder copying dglgo/model/edge_encoder/ele.py -> build/lib/dglgo/model/edge_encoder copying dglgo/model/edge_encoder/__init__.py -> build/lib/dglgo/model/edge_encoder copying recipes/graphpred_hiv_gin.yaml -> build/lib/recipes copying recipes/graphpred_hiv_pna.yaml -> build/lib/recipes copying recipes/graphpred_pcba_gin.yaml -> build/lib/recipes copying recipes/linkpred_citation2_sage.yaml -> build/lib/recipes copying recipes/linkpred_collab_sage.yaml -> build/lib/recipes copying recipes/linkpred_cora_sage.yaml -> build/lib/recipes copying recipes/nodepred-ns_arxiv_gcn.yaml -> build/lib/recipes copying recipes/nodepred-ns_product_sage.yaml -> build/lib/recipes copying recipes/nodepred_citeseer_gat.yaml -> build/lib/recipes copying recipes/nodepred_citeseer_gcn.yaml -> build/lib/recipes copying recipes/nodepred_citeseer_sage.yaml -> build/lib/recipes copying recipes/nodepred_cora_gat.yaml -> build/lib/recipes copying recipes/nodepred_cora_gcn.yaml -> build/lib/recipes copying recipes/nodepred_cora_sage.yaml -> build/lib/recipes copying recipes/nodepred_pubmed_gat.yaml -> build/lib/recipes copying recipes/nodepred_pubmed_gcn.yaml -> build/lib/recipes copying recipes/nodepred_pubmed_sage.yaml -> build/lib/recipes copying dglgo/pipeline/nodepred_sample/nodepred-ns.jinja-py -> build/lib/dglgo/pipeline/nodepred_sample copying dglgo/pipeline/graphpred/graphpred.jinja-py -> build/lib/dglgo/pipeline/graphpred copying dglgo/pipeline/linkpred/linkpred.jinja-py -> build/lib/dglgo/pipeline/linkpred copying dglgo/pipeline/nodepred/nodepred.jinja-py -> build/lib/dglgo/pipeline/nodepred copying dglgo/apply_pipeline/nodepred_sample/nodepred-ns.jinja-py -> build/lib/dglgo/apply_pipeline/nodepred_sample copying dglgo/apply_pipeline/graphpred/graphpred.jinja-py -> build/lib/dglgo/apply_pipeline/graphpred copying dglgo/apply_pipeline/nodepred/nodepred.jinja-py -> build/lib/dglgo/apply_pipeline/nodepred creating build/bdist.linux-x86_64 creating build/bdist.linux-x86_64/egg creating build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/graphpred_hiv_pna.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred_cora_gat.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred_cora_gcn.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred_citeseer_sage.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred_pubmed_sage.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred_pubmed_gcn.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/linkpred_citation2_sage.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/linkpred_cora_sage.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred-ns_product_sage.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred_pubmed_gat.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/graphpred_pcba_gin.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/graphpred_hiv_gin.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred_citeseer_gcn.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred_cora_sage.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred-ns_arxiv_gcn.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/__init__.py -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/linkpred_collab_sage.yaml -> build/bdist.linux-x86_64/egg/recipes copying build/lib/recipes/nodepred_citeseer_gat.yaml -> build/bdist.linux-x86_64/egg/recipes creating build/bdist.linux-x86_64/egg/dglgo creating build/bdist.linux-x86_64/egg/dglgo/pipeline creating build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred_sample copying build/lib/dglgo/pipeline/nodepred_sample/nodepred-ns.jinja-py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred_sample copying build/lib/dglgo/pipeline/nodepred_sample/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred_sample copying build/lib/dglgo/pipeline/nodepred_sample/gen.py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred_sample creating build/bdist.linux-x86_64/egg/dglgo/pipeline/graphpred copying build/lib/dglgo/pipeline/graphpred/graphpred.jinja-py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/graphpred copying build/lib/dglgo/pipeline/graphpred/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/graphpred copying build/lib/dglgo/pipeline/graphpred/gen.py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/graphpred creating build/bdist.linux-x86_64/egg/dglgo/pipeline/linkpred copying build/lib/dglgo/pipeline/linkpred/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/linkpred copying build/lib/dglgo/pipeline/linkpred/linkpred.jinja-py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/linkpred copying build/lib/dglgo/pipeline/linkpred/gen.py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/linkpred creating build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred copying build/lib/dglgo/pipeline/nodepred/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred copying build/lib/dglgo/pipeline/nodepred/nodepred.jinja-py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred copying build/lib/dglgo/pipeline/nodepred/gen.py -> build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred copying build/lib/dglgo/pipeline/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/pipeline creating build/bdist.linux-x86_64/egg/dglgo/utils copying build/lib/dglgo/utils/early_stop.py -> build/bdist.linux-x86_64/egg/dglgo/utils copying build/lib/dglgo/utils/optimizer_config.py -> build/bdist.linux-x86_64/egg/dglgo/utils copying build/lib/dglgo/utils/enter_config.py -> build/bdist.linux-x86_64/egg/dglgo/utils copying build/lib/dglgo/utils/yaml_dump.py -> build/bdist.linux-x86_64/egg/dglgo/utils copying build/lib/dglgo/utils/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/utils copying build/lib/dglgo/utils/factory.py -> build/bdist.linux-x86_64/egg/dglgo/utils copying build/lib/dglgo/utils/base_model.py -> build/bdist.linux-x86_64/egg/dglgo/utils creating build/bdist.linux-x86_64/egg/dglgo/apply_pipeline creating build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred_sample copying build/lib/dglgo/apply_pipeline/nodepred_sample/nodepred-ns.jinja-py -> build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred_sample copying build/lib/dglgo/apply_pipeline/nodepred_sample/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred_sample copying build/lib/dglgo/apply_pipeline/nodepred_sample/gen.py -> build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred_sample creating build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/graphpred copying build/lib/dglgo/apply_pipeline/graphpred/graphpred.jinja-py -> build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/graphpred copying build/lib/dglgo/apply_pipeline/graphpred/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/graphpred copying build/lib/dglgo/apply_pipeline/graphpred/gen.py -> build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/graphpred creating build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred copying build/lib/dglgo/apply_pipeline/nodepred/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred copying build/lib/dglgo/apply_pipeline/nodepred/nodepred.jinja-py -> build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred copying build/lib/dglgo/apply_pipeline/nodepred/gen.py -> build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred copying build/lib/dglgo/apply_pipeline/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/apply_pipeline creating build/bdist.linux-x86_64/egg/dglgo/model creating build/bdist.linux-x86_64/egg/dglgo/model/node_encoder copying build/lib/dglgo/model/node_encoder/sage.py -> build/bdist.linux-x86_64/egg/dglgo/model/node_encoder copying build/lib/dglgo/model/node_encoder/sgc.py -> build/bdist.linux-x86_64/egg/dglgo/model/node_encoder copying build/lib/dglgo/model/node_encoder/gcn.py -> build/bdist.linux-x86_64/egg/dglgo/model/node_encoder copying build/lib/dglgo/model/node_encoder/gin.py -> build/bdist.linux-x86_64/egg/dglgo/model/node_encoder copying build/lib/dglgo/model/node_encoder/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/model/node_encoder copying build/lib/dglgo/model/node_encoder/gat.py -> build/bdist.linux-x86_64/egg/dglgo/model/node_encoder creating build/bdist.linux-x86_64/egg/dglgo/model/graph_encoder copying build/lib/dglgo/model/graph_encoder/gin_ogbg.py -> build/bdist.linux-x86_64/egg/dglgo/model/graph_encoder copying build/lib/dglgo/model/graph_encoder/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/model/graph_encoder copying build/lib/dglgo/model/graph_encoder/pna.py -> build/bdist.linux-x86_64/egg/dglgo/model/graph_encoder creating build/bdist.linux-x86_64/egg/dglgo/model/edge_encoder copying build/lib/dglgo/model/edge_encoder/bilinear.py -> build/bdist.linux-x86_64/egg/dglgo/model/edge_encoder copying build/lib/dglgo/model/edge_encoder/dot.py -> build/bdist.linux-x86_64/egg/dglgo/model/edge_encoder copying build/lib/dglgo/model/edge_encoder/ele.py -> build/bdist.linux-x86_64/egg/dglgo/model/edge_encoder copying build/lib/dglgo/model/edge_encoder/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/model/edge_encoder copying build/lib/dglgo/model/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/model creating build/bdist.linux-x86_64/egg/dglgo/cli copying build/lib/dglgo/cli/config_apply_cli.py -> build/bdist.linux-x86_64/egg/dglgo/cli copying build/lib/dglgo/cli/config_cli.py -> build/bdist.linux-x86_64/egg/dglgo/cli copying build/lib/dglgo/cli/train_cli.py -> build/bdist.linux-x86_64/egg/dglgo/cli copying build/lib/dglgo/cli/cli.py -> build/bdist.linux-x86_64/egg/dglgo/cli copying build/lib/dglgo/cli/export_cli.py -> build/bdist.linux-x86_64/egg/dglgo/cli copying build/lib/dglgo/cli/recipe_cli.py -> build/bdist.linux-x86_64/egg/dglgo/cli copying build/lib/dglgo/cli/__init__.py -> build/bdist.linux-x86_64/egg/dglgo/cli copying build/lib/dglgo/cli/apply_cli.py -> build/bdist.linux-x86_64/egg/dglgo/cli copying build/lib/dglgo/__init__.py -> build/bdist.linux-x86_64/egg/dglgo byte-compiling build/bdist.linux-x86_64/egg/recipes/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred_sample/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred_sample/gen.py to gen.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/pipeline/graphpred/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/pipeline/graphpred/gen.py to gen.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/pipeline/linkpred/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/pipeline/linkpred/gen.py to gen.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/pipeline/nodepred/gen.py to gen.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/pipeline/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/utils/early_stop.py to early_stop.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/utils/optimizer_config.py to optimizer_config.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/utils/enter_config.py to enter_config.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/utils/yaml_dump.py to yaml_dump.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/utils/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/utils/factory.py to factory.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/utils/base_model.py to base_model.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred_sample/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred_sample/gen.py to gen.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/graphpred/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/graphpred/gen.py to gen.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/nodepred/gen.py to gen.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/apply_pipeline/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/node_encoder/sage.py to sage.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/node_encoder/sgc.py to sgc.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/node_encoder/gcn.py to gcn.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/node_encoder/gin.py to gin.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/node_encoder/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/node_encoder/gat.py to gat.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/graph_encoder/gin_ogbg.py to gin_ogbg.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/graph_encoder/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/graph_encoder/pna.py to pna.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/edge_encoder/bilinear.py to bilinear.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/edge_encoder/dot.py to dot.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/edge_encoder/ele.py to ele.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/edge_encoder/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/model/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/cli/config_apply_cli.py to config_apply_cli.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/cli/config_cli.py to config_cli.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/cli/train_cli.py to train_cli.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/cli/cli.py to cli.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/cli/export_cli.py to export_cli.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/cli/recipe_cli.py to recipe_cli.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/cli/__init__.py to __init__.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/cli/apply_cli.py to apply_cli.cpython-37.pyc byte-compiling build/bdist.linux-x86_64/egg/dglgo/__init__.py to __init__.cpython-37.pyc creating build/bdist.linux-x86_64/egg/EGG-INFO copying dglgo.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO copying dglgo.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dglgo.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dglgo.egg-info/entry_points.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dglgo.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO copying dglgo.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/command/install.py:37: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools. setuptools.SetuptoolsDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/command/easy_install.py:147: EasyInstallDeprecationWarning: easy_install command is deprecated. Use build and pip and other standards-based tools. EasyInstallDeprecationWarning, zip_safe flag not set; analyzing archive contents... dglgo.apply_pipeline.graphpred.__pycache__.gen.cpython-37: module references __file__ dglgo.apply_pipeline.nodepred.__pycache__.gen.cpython-37: module references __file__ dglgo.apply_pipeline.nodepred_sample.__pycache__.gen.cpython-37: module references __file__ dglgo.cli.__pycache__.recipe_cli.cpython-37: module references __file__ dglgo.pipeline.graphpred.__pycache__.gen.cpython-37: module references __file__ dglgo.pipeline.linkpred.__pycache__.gen.cpython-37: module references __file__ dglgo.pipeline.nodepred.__pycache__.gen.cpython-37: module references __file__ dglgo.pipeline.nodepred_sample.__pycache__.gen.cpython-37: module references __file__ dglgo.utils.__pycache__.factory.cpython-37: module references __file__ creating dist creating 'dist/dglgo-0.0.2-py3.7.egg' and adding 'build/bdist.linux-x86_64/egg' to it removing 'build/bdist.linux-x86_64/egg' (and everything under it) Processing dglgo-0.0.2-py3.7.egg creating /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg Extracting dglgo-0.0.2-py3.7.egg to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-int64] Adding dglgo 0.0.2 to easy-install.pth file Installing dgl script to /opt/conda/envs/pytorch-ci/bin Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg Processing dependencies for dglgo==0.0.2 Searching for rdkit-pypi Reading https://pypi.org/simple/rdkit-pypi/ PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g4-int32] [05:34:52] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:34:52] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g4-int64] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g5-int32] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g5-int64] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-int32] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g1-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g1-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g2-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g2-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g3-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g3-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g4-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g4-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g5-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g5-int64] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g6-int32] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g6-int64] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-int32] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-int64] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-int32] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-int64] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g2-int32] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g2-int64] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g3-int32] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g3-int64] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g4-int32] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g4-int64] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-int32] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-int64] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-int32] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-int64] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[int32] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[int64] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[int32] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[int64] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[int32] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[int64] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[int32] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[int64] PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[int32] Downloading https://files.pythonhosted.org/packages/8f/82/f26ed8d8c943235a1700b3e09f98d2c18f7e6b9eb0839460d8b3b5c26671/rdkit_pypi-2022.3.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=de4a4492a812724037fea086b4e6677124ca91a10fa196d83b9c8acac7c64d5d Best match: rdkit-pypi 2022.3.5 Processing rdkit_pypi-2022.3.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl Installing rdkit_pypi-2022.3.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Client [2265] waits on 172.17.0.3:39213 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[int64] Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (2) shutdown.Server (1) shutdown. Server is exiting... Server is exiting... PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[int32] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[int64] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[int32] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[int64] PASSED [ 19%] tests/compute/test_removal.py::test_issue1287[int32] PASSED [ 19%] tests/compute/test_removal.py::test_issue1287[int64] PASSED [ 19%] tests/compute/test_sampler.py::test_create_full PASSED [ 19%] tests/compute/test_sampler.py::test_1neighbor_sampler_all Adding rdkit-pypi 2022.3.5 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/rdkit_pypi-2022.3.5-py3.7-linux-x86_64.egg Searching for PyYAML>=5.1 Reading https://pypi.org/simple/PyYAML/ PASSED [ 19%] tests/compute/test_sampler.py::test_1neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_prefetch_neighbor_sampler Downloading https://files.pythonhosted.org/packages/eb/5f/6e6fe6904e1a9c67bc2ca5629a69e7a5a0b17f079da838bab98a1e548b25/PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl#sha256=231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9 Best match: PyYAML 6.0 Processing PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl Installing PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding PyYAML 6.0 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/PyYAML-6.0-py3.7-linux-x86_64.egg Searching for ruamel.yaml>=0.17.20 Reading https://pypi.org/simple/ruamel.yaml/ PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler_all PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_layer_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_setseed PASSED [ 20%] tests/compute/test_sampler.py::test_negative_sampler PASSED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-idtype0] PASSED [ 11%] tests/compute/test_kernel.py::test_mean_zero_degree[g0-idtype1] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[idtype0] PASSED [ 11%] tests/compute/test_merge.py::test_heterograph_merge[idtype1] PASSED [ 11%] tests/compute/test_nccl.py::test_nccl_id SKIPPED (NCCL only runs on ...) [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_remainder SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_remainder SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_push_single_range SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_sparse_pull_single_range SKIPPED [ 11%] tests/compute/test_nccl.py::test_nccl_support SKIPPED (NCCL only run...) [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype0] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_u[idtype1] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype0] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_unary_copy_e[idtype1] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype0] SKIPPED [ 11%] tests/compute/test_new_update_all_hetero.py::test_binary_op[idtype1] SKIPPED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[idtype0] SKIPPED [ 11%] tests/compute/test_partition.py::test_get_node_partition_from_book[idtype1] SKIPPED [ 12%] tests/compute/test_pickle.py::test_pickling_index PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph_index PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g0-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g0-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g1-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g1-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g2-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g3-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g4-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g5-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g6-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g7-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g8-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g9-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-idtype0] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g10-idtype1] PASSED [ 12%] tests/compute/test_pickle.py::test_pickling_graph[g11-idtype0] PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g11-idtype1] PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g12-idtype0] PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_graph[g12-idtype1] PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_batched_heterograph PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_subgraph PASSED [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[idtype0] SKIPPED [ 13%] tests/compute/test_pickle.py::test_pickling_is_pinned[idtype1] SKIPPED [ 13%] tests/compute/test_pin_memory.py::test_pin_unpin SKIPPED (Need gpu f...) [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[idtype0] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_bfs[idtype1] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[idtype0] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_edges_dfs[idtype1] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype0] PASSED [ 13%] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype1] PASSED [ 13%] tests/compute/test_random.py::test_random_choice PASSED [ 13%] tests/compute/test_readout.py::test_sum_case1[idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_sum_case1[idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g0-idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g1-idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g2-idtype1] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-idtype0] PASSED [ 13%] tests/compute/test_readout.py::test_reduce_readout[sum-g3-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g4-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g5-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[sum-g6-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g0-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g1-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g2-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g3-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g4-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g5-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[max-g6-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g0-idtype1] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-idtype0] PASSED [ 14%] tests/compute/test_readout.py::test_reduce_readout[mean-g1-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g2-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g3-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g4-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g5-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_reduce_readout[mean-g6-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g0-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g1-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g2-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g3-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g4-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g5-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-idtype0] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[sum-g6-idtype1] PASSED [ 15%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g0-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g1-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g2-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g3-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g4-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g5-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[max-g6-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g0-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g1-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g2-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g3-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-idtype0] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g4-idtype1] PASSED [ 16%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g5-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_weighted_reduce_readout[mean-g6-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g0-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g0-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g1-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g2-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g3-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g4-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g5-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[True-g6-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g0-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g1-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g2-idtype1] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-idtype0] PASSED [ 17%] tests/compute/test_readout.py::test_topk[False-g3-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g4-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g4-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g5-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g5-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_topk[False-g6-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g0-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g1-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g1-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g2-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g2-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g3-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g3-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g4-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g4-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g5-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g5-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g6-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_softmax[g6-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g0-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-idtype0] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g1-idtype1] PASSED [ 18%] tests/compute/test_readout.py::test_broadcast[g2-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g2-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g3-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g3-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g4-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g4-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g5-idtype1] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-idtype0] PASSED [ 19%] tests/compute/test_readout.py::test_broadcast[g6-idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_node_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_multigraph_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_and_edge_removal[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_node_frame[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[idtype0] PASSED [ 19%] tests/compute/test_removal.py::test_edge_frame[idtype1] PASSED [ 19%] tests/compute/test_removal.py::test_issue1287[idtype0] PASSED [ 20%] tests/compute/test_removal.py::test_issue1287[idtype1] PASSED [ 20%] tests/compute/test_sampler.py::test_create_full PASSED [ 20%] tests/compute/test_sampler.py::test_1neighbor_sampler_all PASSED [ 20%] tests/compute/test_sampler.py::test_1neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_prefetch_neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler_all PASSED [ 20%] tests/compute/test_sampler.py::test_10neighbor_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_layer_sampler PASSED [ 20%] tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler SKIPPED [ 20%] tests/compute/test_sampler.py::test_setseed PASSED [ 20%] tests/compute/test_sampler.py::test_negative_sampler SKIPPED (TF doe...) [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_node2vec PASSED [ 20%] tests/compute/test_sampling.py::test_pack_traces PASSED [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[False] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_noprob Downloading https://files.pythonhosted.org/packages/9e/cb/938214ac358fbef7058343b3765c79a1b7ed0c366f7f992ce7ff38335652/ruamel.yaml-0.17.21-py3-none-any.whl#sha256=742b35d3d665023981bd6d16b3d24248ce5df75fdb4e2924e93a05c1f8b61ca7 Best match: ruamel.yaml 0.17.21 Processing ruamel.yaml-0.17.21-py3-none-any.whl Installing ruamel.yaml-0.17.21-py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Converting to homogeneous graph takes 0.010s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.017 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.001 seconds, peak memory: 1.841 GB [05:35:13] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 30030 nodes and 601200 edges into 3 parts and get 151889 edge cuts Metis partitioning: 0.177 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.195s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.016 seconds Split the graph: 0.045 seconds Construct subgraphs: 0.043 seconds Splitting the graph into partitions takes 0.104s, peak mem: 1.841 GB part 0 has 9942 nodes of type n1 and 3143 are inside the partition part 0 has 9963 nodes of type n2 and 3139 are inside the partition part 0 has 9973 nodes of type n3 and 3142 are inside the partition part 0 has 47618 edges of type r1 and 31517 are inside the partition part 0 has 47779 edges of type r2 and 31406 are inside the partition part 0 has 47492 edges of type r3 and 31197 are inside the partition part 1 has 9967 nodes of type n1 and 3424 are inside the partition part 1 has 9984 nodes of type n2 and 3436 are inside the partition part 1 has 9989 nodes of type n3 and 3438 are inside the partition part 1 has 51567 edges of type r1 and 34410 are inside the partition part 1 has 51514 edges of type r2 and 34297 are inside the partition part 1 has 51767 edges of type r3 and 34580 are inside the partition part 2 has 9964 nodes of type n1 and 3433 are inside the partition part 2 has 9985 nodes of type n2 and 3435 are inside the partition part 2 has 9988 nodes of type n3 and 3440 are inside the partition part 2 has 51379 edges of type r1 and 34173 are inside the partition part 2 has 51847 edges of type r2 and 34497 are inside the partition part 2 has 51526 edges of type r3 and 34523 are inside the partition Save partitions: 0.037 seconds, peak memory: 1.841 GB There are 300600 edges in the graph and 0 edge cuts for 3 partitions. server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:35:14] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:14] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13169]... server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:35:15] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:15] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13172]... Adding ruamel.yaml 0.17.21 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/ruamel.yaml-0.17.21-py3.7.egg Searching for pydantic>=1.9.0 Reading https://pypi.org/simple/pydantic/ server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:35:16] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:16] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:13175]... [05:35:17] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:17] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_prob Client [2305] waits on 172.17.0.3:34813 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") Client[0] in group[0] is exiting... Server (1) shutdown. Server is exiting... Server (0) shutdown. Server is exiting... Server (2) shutdown. Server is exiting... PASSED [ 49%] tests/distributed/test_mp_dataloader.py::test_dataloader[node-4-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:35:21] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.004 seconds Splitting the graph into partitions takes 0.008s, peak mem: 1.841 GB part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.020 seconds, peak memory: 1.841 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. Downloading https://files.pythonhosted.org/packages/d4/ec/230ab377c457cd68cfda78759e2a57f8c08a9e9adb4cd53c4d2fc9100b15/pydantic-1.10.2-py3-none-any.whl#sha256=1b6ee725bd6e83ec78b1aa32c5b1fa67a3a65badddde3976bca5fe4568f27709 Best match: pydantic 1.10.2 Processing pydantic-1.10.2-py3-none-any.whl Installing pydantic-1.10.2-py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding pydantic 1.10.2 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pydantic-1.10.2-py3.7.egg Searching for numpydoc>=1.1.0 Reading https://pypi.org/simple/numpydoc/ Downloading https://files.pythonhosted.org/packages/e7/1a/9e3c2a34aae5bd1ab8988b238aafeb4c8d3ab312b8aa5a8c37be6c6d869d/numpydoc-1.4.0-py3-none-any.whl#sha256=fd26258868ebcc75c816fe68e1d41e3b55bd410941acfb969dee3eef6e5cf260 Best match: numpydoc 1.4.0 Processing numpydoc-1.4.0-py3-none-any.whl Installing numpydoc-1.4.0-py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding numpydoc 1.4.0 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/numpydoc-1.4.0-py3.7.egg Searching for autopep8>=1.6.0 Reading https://pypi.org/simple/autopep8/ Downloading https://files.pythonhosted.org/packages/5d/9b/1ed75f8c9086fafe0e9bbb379a70c43b1aa9dff6154ddcfb818f78cb0736/autopep8-1.7.0-py2.py3-none-any.whl#sha256=6f09e90a2be784317e84dc1add17ebfc7abe3924239957a37e5040e27d812087 Best match: autopep8 1.7.0 Processing autopep8-1.7.0-py2.py3-none-any.whl Installing autopep8-1.7.0-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:35:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21763]... Adding autopep8 1.7.0 to easy-install.pth file Installing autopep8 script to /opt/conda/envs/pytorch-ci/bin Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/autopep8-1.7.0-py3.7.egg Searching for isort>=5.10.1 Reading https://pypi.org/simple/isort/ server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:35:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21766]... PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_outedge Downloading https://files.pythonhosted.org/packages/b8/5b/f18e227df38b94b4ee30d2502fd531bebac23946a2497e5595067a561274/isort-5.10.1-py3-none-any.whl#sha256=6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7 Best match: isort 5.10.1 Processing isort-5.10.1-py3-none-any.whl Installing isort-5.10.1-py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") Adding isort 5.10.1 to easy-install.pth file Installing isort script to /opt/conda/envs/pytorch-ci/bin Installing isort-identify-imports script to /opt/conda/envs/pytorch-ci/bin Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/isort-5.10.1-py3.7.egg Searching for typer>=0.4.0 Reading https://pypi.org/simple/typer/ load test_sampling start graph service on server 2 for part 2 [05:35:24] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:24] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21769]... Downloading https://files.pythonhosted.org/packages/e8/9b/7470461c68588ed09c2e53cbb16b802815232796d95f7b4744cc8842f19d/typer-0.6.1-py3-none-any.whl#sha256=54b19e5df18654070a82f8c2aa1da456a4ac16a2a83e6dcd9f170e291c56338e Best match: typer 0.6.1 Processing typer-0.6.1-py3-none-any.whl Installing typer-0.6.1-py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding typer 0.6.1 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/typer-0.6.1-py3.7.egg Searching for ruamel.yaml.clib>=0.2.6 Reading https://pypi.org/simple/ruamel.yaml.clib/ [05:35:25] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:25] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:35:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:35:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Downloading https://files.pythonhosted.org/packages/98/8a/ba37489b423916162b086b01c7c18001cf297350694180468e1698085c58/ruamel.yaml.clib-0.2.6-cp37-cp37m-manylinux1_x86_64.whl#sha256=78988ed190206672da0f5d50c61afef8f67daa718d614377dcd5e3ed85ab4a99 Best match: ruamel.yaml.clib 0.2.6 Processing ruamel.yaml.clib-0.2.6-cp37-cp37m-manylinux1_x86_64.whl Installing ruamel.yaml.clib-0.2.6-cp37-cp37m-manylinux1_x86_64.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages [05:35:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:35:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Adding ruamel.yaml.clib 0.2.6 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/ruamel.yaml.clib-0.2.6-py3.7-linux-x86_64.egg Searching for Jinja2>=2.10 Reading https://pypi.org/simple/Jinja2/ Downloading https://files.pythonhosted.org/packages/bc/c3/f068337a370801f372f2f8f6bad74a5c140f6fda3d9de154052708dd3c65/Jinja2-3.1.2-py3-none-any.whl#sha256=6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 Best match: Jinja2 3.1.2 Processing Jinja2-3.1.2-py3-none-any.whl Installing Jinja2-3.1.2-py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding Jinja2 3.1.2 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/Jinja2-3.1.2-py3.7.egg Searching for sphinx>=3.0 Reading https://pypi.org/simple/sphinx/ Downloading https://files.pythonhosted.org/packages/00/5c/27480aeb398ab549cbb0840d154464d4854ac49f6fff0b452e106bc54f8e/sphinx-5.2.1-py3-none-any.whl#sha256=3dcf00fcf82cf91118db9b7177edea4fc01998976f893928d0ab0c58c54be2ca Best match: sphinx 5.2.1 Processing sphinx-5.2.1-py3-none-any.whl Installing sphinx-5.2.1-py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding sphinx 5.2.1 to easy-install.pth file Installing sphinx-apidoc script to /opt/conda/envs/pytorch-ci/bin Installing sphinx-autogen script to /opt/conda/envs/pytorch-ci/bin Installing sphinx-build script to /opt/conda/envs/pytorch-ci/bin Installing sphinx-quickstart script to /opt/conda/envs/pytorch-ci/bin Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/sphinx-5.2.1-py3.7.egg Searching for toml Reading https://pypi.org/simple/toml/ Downloading https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl#sha256=806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b Client [2335] waits on 172.17.0.3:55517 Client [2340] waits on 172.17.0.3:40105 Client [2341] waits on 172.17.0.3:50781Client [2339] waits on 172.17.0.3:47191Client [2342] waits on 172.17.0.3:46505 Machine (0) group (0) client (4) connect to server successfuly! Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly! Machine (0) group (0) client (3) connect to server successfuly! Machine (0) group (0) client (1) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") Best match: toml 0.10.2 Processing toml-0.10.2-py2.py3-none-any.whl Installing toml-0.10.2-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding toml 0.10.2 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/toml-0.10.2-py3.7.egg Searching for pycodestyle>=2.9.1 Reading https://pypi.org/simple/pycodestyle/ Client[4] in group[0] is exiting... Client[2] in group[0] is exiting... Downloading https://files.pythonhosted.org/packages/67/e4/fc77f1039c34b3612c4867b69cbb2b8a4e569720b1f19b0637002ee03aff/pycodestyle-2.9.1-py2.py3-none-any.whl#sha256=d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b Best match: pycodestyle 2.9.1 Processing pycodestyle-2.9.1-py2.py3-none-any.whl Installing pycodestyle-2.9.1-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Client[0] in group[0] is exiting... Adding pycodestyle 2.9.1 to easy-install.pth file Installing pycodestyle script to /opt/conda/envs/pytorch-ci/bin Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pycodestyle-2.9.1-py3.7.egg Searching for MarkupSafe>=2.0 Reading https://pypi.org/simple/MarkupSafe/ Client[3] in group[0] is exiting... Client[1] in group[0] is exiting... Server (2) shutdown. Server (1) shutdown.Server is exiting... Server is exiting... Server (0) shutdown. Server is exiting... Downloading https://files.pythonhosted.org/packages/9f/83/b221ce5a0224f409b9f02b0dc6cb0b921c46033f4870d64fa3e8a96af701/MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl#sha256=e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37 PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk Best match: MarkupSafe 2.1.1 Processing MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl Installing MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding MarkupSafe 2.1.1 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/MarkupSafe-2.1.1-py3.7-linux-x86_64.egg Searching for imagesize>=1.3 Reading https://pypi.org/simple/imagesize/ Downloading https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl#sha256=0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b Best match: imagesize 1.4.1 Processing imagesize-1.4.1-py2.py3-none-any.whl Installing imagesize-1.4.1-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding imagesize 1.4.1 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/imagesize-1.4.1-py3.7.egg Searching for alabaster<0.8,>=0.7 Reading https://pypi.org/simple/alabaster/ Downloading https://files.pythonhosted.org/packages/10/ad/00b090d23a222943eb0eda509720a404f531a439e803f6538f35136cae9e/alabaster-0.7.12-py2.py3-none-any.whl#sha256=446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk_outedge Best match: alabaster 0.7.12 Processing alabaster-0.7.12-py2.py3-none-any.whl Installing alabaster-0.7.12-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Adding alabaster 0.7.12 to easy-install.pth file PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_with_0deg Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/alabaster-0.7.12-py3.7.egg Searching for babel>=2.9 Reading https://pypi.org/simple/babel/ Downloading https://files.pythonhosted.org/packages/2e/57/a4177e24f8ed700c037e1eca7620097fdfbb1c9b358601e40169adf6d364/Babel-2.10.3-py3-none-any.whl#sha256=ff56f4892c1c4bf0d814575ea23471c230d544203c7748e8c68f0089478d48eb PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_biased_homogeneous Best match: Babel 2.10.3 Processing Babel-2.10.3-py3-none-any.whl Installing Babel-2.10.3-py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_biased_bipartite Adding Babel 2.10.3 to easy-install.pth file Installing pybabel script to /opt/conda/envs/pytorch-ci/bin Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/Babel-2.10.3-py3.7.egg Searching for snowballstemmer>=2.0 Reading https://pypi.org/simple/snowballstemmer/ Downloading https://files.pythonhosted.org/packages/ed/dc/c02e01294f7265e63a7315fe086dd1df7dacb9f840a804da846b96d01b96/snowballstemmer-2.2.0-py2.py3-none-any.whl#sha256=c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a Best match: snowballstemmer 2.2.0 Processing snowballstemmer-2.2.0-py2.py3-none-any.whl Installing snowballstemmer-2.2.0-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-coo] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csr] Adding snowballstemmer 2.2.0 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/snowballstemmer-2.2.0-py3.7.egg Searching for docutils<0.20,>=0.14 Reading https://pypi.org/simple/docutils/ PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-coo] Downloading https://files.pythonhosted.org/packages/93/69/e391bd51bc08ed9141ecd899a0ddb61ab6465309f1eb470905c0c8868081/docutils-0.19-py3-none-any.whl#sha256=5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc Best match: docutils 0.19 Processing docutils-0.19-py3-none-any.whl Installing docutils-0.19-py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-coo] Adding docutils 0.19 to easy-install.pth file Installing rst2html4.py script to /opt/conda/envs/pytorch-ci/bin Installing rstpep2html.py script to /opt/conda/envs/pytorch-ci/bin Installing rst2man.py script to /opt/conda/envs/pytorch-ci/bin Installing rst2xetex.py script to /opt/conda/envs/pytorch-ci/bin Installing rst2s5.py script to /opt/conda/envs/pytorch-ci/bin Installing rst2pseudoxml.py script to /opt/conda/envs/pytorch-ci/bin Installing rst2html5.py script to /opt/conda/envs/pytorch-ci/bin Installing rst2odt.py script to /opt/conda/envs/pytorch-ci/bin Installing rst2latex.py script to /opt/conda/envs/pytorch-ci/bin Installing rst2html.py script to /opt/conda/envs/pytorch-ci/bin Installing rst2odt_prepstyles.py script to /opt/conda/envs/pytorch-ci/bin Installing rst2xml.py script to /opt/conda/envs/pytorch-ci/bin Installing docutils script to /opt/conda/envs/pytorch-ci/bin Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/docutils-0.19-py3.7.egg Searching for Pygments>=2.12 Reading https://pypi.org/simple/Pygments/ PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csr] Downloading https://files.pythonhosted.org/packages/4f/82/672cd382e5b39ab1cd422a672382f08a1fb3d08d9e0c0f3707f33a52063b/Pygments-2.13.0-py3-none-any.whl#sha256=f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42 Best match: Pygments 2.13.0 Processing Pygments-2.13.0-py3-none-any.whl PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-coo] Installing Pygments-2.13.0-py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csr] Adding Pygments 2.13.0 to easy-install.pth file Installing pygmentize script to /opt/conda/envs/pytorch-ci/bin Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/Pygments-2.13.0-py3.7.egg Searching for sphinxcontrib-qthelp Reading https://pypi.org/simple/sphinxcontrib-qthelp/ PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int32] Downloading https://files.pythonhosted.org/packages/2b/14/05f9206cf4e9cfca1afb5fd224c7cd434dcc3a433d6d9e4e0264d29c6cdb/sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl#sha256=bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6 Best match: sphinxcontrib-qthelp 1.0.3 Processing sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl Installing sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int64] Adding sphinxcontrib-qthelp 1.0.3 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/sphinxcontrib_qthelp-1.0.3-py3.7.egg Searching for sphinxcontrib-serializinghtml>=1.1.5 Reading https://pypi.org/simple/sphinxcontrib-serializinghtml/ Downloading https://files.pythonhosted.org/packages/c6/77/5464ec50dd0f1c1037e3c93249b040c8fc8078fdda97530eeb02424b6eea/sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl#sha256=352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd Best match: sphinxcontrib-serializinghtml 1.1.5 Processing sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl Installing sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] Adding sphinxcontrib-serializinghtml 1.1.5 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/sphinxcontrib_serializinghtml-1.1.5-py3.7.egg Searching for sphinxcontrib-htmlhelp>=2.0.0 Reading https://pypi.org/simple/sphinxcontrib-htmlhelp/ Downloading https://files.pythonhosted.org/packages/63/40/c854ef09500e25f6432dcbad0f37df87fd7046d376272292d8654cc71c95/sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl#sha256=d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 Best match: sphinxcontrib-htmlhelp 2.0.0 Processing sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl Installing sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] Adding sphinxcontrib-htmlhelp 2.0.0 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/sphinxcontrib_htmlhelp-2.0.0-py3.7.egg Searching for sphinxcontrib-jsmath Reading https://pypi.org/simple/sphinxcontrib-jsmath/ PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[True] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[False] Downloading https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl#sha256=2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 Best match: sphinxcontrib-jsmath 1.0.1 Processing sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl Installing sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[True] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[False] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[True] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[False] PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_tensors PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_empty_dict PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files1 PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files2 PASSED [ 22%] tests/compute/test_serialize.py::test_deserialize_old_heterograph_file PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph Adding sphinxcontrib-jsmath 1.0.1 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/sphinxcontrib_jsmath-1.0.1-py3.7.egg Searching for sphinxcontrib-devhelp Reading https://pypi.org/simple/sphinxcontrib-devhelp/ Downloading https://files.pythonhosted.org/packages/c5/09/5de5ed43a521387f18bdf5f5af31d099605c992fd25372b2b9b825ce48ee/sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl#sha256=8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e Best match: sphinxcontrib-devhelp 1.0.2 Processing sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl Installing sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph_s3 SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_single_process[idtype0] SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_single_process[idtype1] SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[idtype0] SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[idtype1] SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_copy_from_gpu SKIPPED (Need g...) [ 22%] tests/compute/test_sort.py::test_sort_with_tag[idtype0] PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag[idtype1] Adding sphinxcontrib-devhelp 1.0.2 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/sphinxcontrib_devhelp-1.0.2-py3.7.egg Searching for sphinxcontrib-applehelp Reading https://pypi.org/simple/sphinxcontrib-applehelp/ Downloading https://files.pythonhosted.org/packages/dc/47/86022665a9433d89a66f5911b558ddff69861766807ba685de2e324bd6ed/sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl#sha256=806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a Best match: sphinxcontrib-applehelp 1.0.2 Processing sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl Installing sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl to /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[idtype0] PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[idtype1] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g0] Adding sphinxcontrib-applehelp 1.0.2 to easy-install.pth file Installed /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/sphinxcontrib_applehelp-1.0.2-py3.7.egg Searching for scikit-learn==1.0.2 Best match: scikit-learn 1.0.2 Adding scikit-learn 1.0.2 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for ogb==1.3.3 Best match: ogb 1.3.3 Adding ogb 1.3.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for threadpoolctl==3.1.0 Best match: threadpoolctl 3.1.0 Adding threadpoolctl 3.1.0 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for joblib==1.1.0 Best match: joblib 1.1.0 Adding joblib 1.1.0 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for scipy==1.7.3 Best match: scipy 1.7.3 Adding scipy 1.7.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for numpy==1.21.6 Best match: numpy 1.21.6 Adding numpy 1.21.6 to easy-install.pth file Installing f2py script to /opt/conda/envs/pytorch-ci/bin Installing f2py3 script to /opt/conda/envs/pytorch-ci/bin Installing f2py3.7 script to /opt/conda/envs/pytorch-ci/bin Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for Pillow==9.2.0 Best match: Pillow 9.2.0 Adding Pillow 9.2.0 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for outdated==0.2.1 Best match: outdated 0.2.1 Adding outdated 0.2.1 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for urllib3==1.26.11 Best match: urllib3 1.26.11 Adding urllib3 1.26.11 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for six==1.16.0 Best match: six 1.16.0 Adding six 1.16.0 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for pandas==1.1.5 Best match: pandas 1.1.5 Adding pandas 1.1.5 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for tqdm==4.64.0 Best match: tqdm 4.64.0 Adding tqdm 4.64.0 to easy-install.pth file Installing tqdm script to /opt/conda/envs/pytorch-ci/bin Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for torch==1.9.0+cpu Best match: torch 1.9.0+cpu Adding torch 1.9.0+cpu to easy-install.pth file Installing convert-caffe2-to-onnx script to /opt/conda/envs/pytorch-ci/bin Installing convert-onnx-to-caffe2 script to /opt/conda/envs/pytorch-ci/bin Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for typing-extensions==4.3.0 Best match: typing-extensions 4.3.0 Adding typing-extensions 4.3.0 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for click==8.1.3 Best match: click 8.1.3 Adding click 8.1.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for requests==2.28.1 Best match: requests 2.28.1 Adding requests 2.28.1 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for littleutils==0.2.2 Best match: littleutils 0.2.2 Adding littleutils 0.2.2 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for pytz==2022.2.1 Best match: pytz 2022.2.1 Adding pytz 2022.2.1 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for python-dateutil==2.8.2 Best match: python-dateutil 2.8.2 Adding python-dateutil 2.8.2 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for importlib-metadata==4.12.0 Best match: importlib-metadata 4.12.0 Adding importlib-metadata 4.12.0 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for packaging==21.3 Best match: packaging 21.3 Adding packaging 21.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for certifi==2022.6.15 Best match: certifi 2022.6.15 Adding certifi 2022.6.15 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for idna==3.3 Best match: idna 3.3 Adding idna 3.3 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for charset-normalizer==2.1.0 Best match: charset-normalizer 2.1.0 Adding charset-normalizer 2.1.0 to easy-install.pth file Installing normalizer script to /opt/conda/envs/pytorch-ci/bin Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for zipp==3.8.1 Best match: zipp 3.8.1 Adding zipp 3.8.1 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Searching for pyparsing==3.0.9 Best match: pyparsing 3.0.9 Adding pyparsing 3.0.9 to easy-install.pth file Using /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages Finished processing dependencies for dglgo==0.0.2 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: post1 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: qthelp-1.0.0 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: qthelp-1.0.1 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: qthelp-1.0.2 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: qthelp-1.0.3 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: serializinghtml-1.0.0 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: serializinghtml-1.1.0 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: serializinghtml-1.1.1 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: serializinghtml-1.1.3 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: serializinghtml-1.1.4 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: serializinghtml-1.1.5 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: htmlhelp-1.0.0 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: htmlhelp-1.0.1 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: htmlhelp-1.0.2 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: htmlhelp-1.0.3 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: htmlhelp-2.0.0 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: jsmath-1.0.0 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: jsmath-1.0.1 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: devhelp-1.0.0 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: devhelp-1.0.1 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: devhelp-1.0.2 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: applehelp-1.0.0 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: applehelp-1.0.1 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pkg_resources/__init__.py:125: PkgResourcesDeprecationWarning: applehelp-1.0.2 is an invalid version and will not be supported in a future release PkgResourcesDeprecationWarning, ~/jenkins/workspace/dgl_PR-4648@3 PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g1] Collecting psutil Downloading psutil-5.9.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (281 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 281.3/281.3 kB 10.4 MB/s eta 0:00:00 Installing collected packages: psutil PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g0] Successfully installed psutil-5.9.2 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/pytorch-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648@3 collecting ... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g1] collected 81 items tests/go/test_model.py::test_gcn[g0] FAILED [ 1%] tests/go/test_model.py::test_gcn_block[g0] FAILED [ 2%] tests/go/test_model.py::test_gat[g0] FAILED [ 3%] tests/go/test_model.py::test_gat_block[g0] FAILED [ 4%] tests/go/test_model.py::test_gin[g0] FAILED [ 6%] tests/go/test_model.py::test_sage[g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g0] FAILED [ 7%] tests/go/test_model.py::test_sage_block[g0] FAILED [ 8%] tests/go/test_model.py::test_sgc[g0] FAILED [ 9%] tests/go/test_model.py::test_bilinear PASSED [ 11%] tests/go/test_model.py::test_ele PASSED [ 12%] tests/go/test_model.py::test_ogbg_gin[True] FAILED [ 13%] tests/go/test_model.py::test_ogbg_gin[False] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g1] FAILED [ 14%] tests/go/test_model.py::test_pna FAILED [ 16%] tests/go/test_pipeline.py::test_nodepred_data[cora] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp4-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp4-g1] Converting to homogeneous graph takes 0.009s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.018 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.001 seconds, peak memory: 1.841 GB [05:35:49] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 30030 nodes and 601200 edges into 3 parts and get 152443 edge cuts Metis partitioning: 0.212 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.232s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.018 seconds Split the graph: 0.058 seconds Construct subgraphs: 0.056 seconds Splitting the graph into partitions takes 0.132s, peak mem: 1.841 GB part 0 has 9953 nodes of type n1 and 3136 are inside the partition part 0 has 9963 nodes of type n2 and 3140 are inside the partition part 0 has 9959 nodes of type n3 and 3142 are inside the partition part 0 has 47353 edges of type r1 and 31289 are inside the partition part 0 has 47698 edges of type r2 and 31241 are inside the partition part 0 has 47467 edges of type r3 and 31243 are inside the partition part 1 has 9968 nodes of type n1 and 3432 are inside the partition part 1 has 9983 nodes of type n2 and 3435 are inside the partition part 1 has 9990 nodes of type n3 and 3440 are inside the partition part 1 has 51615 edges of type r1 and 34407 are inside the partition part 1 has 51895 edges of type r2 and 34446 are inside the partition part 1 has 51968 edges of type r3 and 34664 are inside the partition part 2 has 9975 nodes of type n1 and 3432 are inside the partition part 2 has 9988 nodes of type n2 and 3435 are inside the partition part 2 has 9983 nodes of type n3 and 3438 are inside the partition part 2 has 51626 edges of type r1 and 34404 are inside the partition part 2 has 51781 edges of type r2 and 34513 are inside the partition part 2 has 51640 edges of type r3 and 34393 are inside the partition Save partitions: 0.047 seconds, peak memory: 1.841 GB There are 300600 edges in the graph and 0 edge cuts for 3 partitions. server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:35:50] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:50] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21270]... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp5-g0] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:35:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21273]... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp5-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp0-g0] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:35:52] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:52] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21276]... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp0-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp1-g0] [05:35:53] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:53] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp1-g1] [05:35:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:35:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:35:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:35:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:35:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp2-g0] PASSED [ 17%] tests/go/test_pipeline.py::test_nodepred_data[citeseer] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp2-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g1] Client [2460] waits on 172.17.0.3:57381Client [2457] waits on 172.17.0.3:55343 Client [2458] waits on 172.17.0.3:57261 Client [2459] waits on 172.17.0.3:46473 Client [2451] waits on 172.17.0.3:49357 Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly! Machine (0) group (0) client (4) connect to server successfuly! Machine (0) group (0) client (3) connect to server successfuly! Machine (0) group (0) client (1) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp4-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp4-g1] Client[1] in group[0] is exiting... Client[2] in group[0] is exiting... Client[3] in group[0] is exiting... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp5-g0] Client[0] in group[0] is exiting... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp5-g1] Client[4] in group[0] is exiting... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g0] Server (0) shutdown. Server is exiting... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g1] Server (2) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g1] PASSED [ 18%] tests/go/test_pipeline.py::test_nodepred_data[pubmed] PASSED [ 50%] tests/distributed/test_mp_dataloader.py::test_dataloader[edge-0-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.002s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:36:03] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.004 seconds Splitting the graph into partitions takes 0.008s, peak mem: 1.841 GB part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.028 seconds, peak memory: 1.841 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp2-g0] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:36:05] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:05] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:29782]... PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp2-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g0] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:36:06] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:06] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:29785]... PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:36:07] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:07] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:29788]... PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp4-g0] [05:36:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp4-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp5-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp5-g1] PASSED [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_non_uniform_random_walk[False] PASSED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_uniform_random_walk[False] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp0-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp0-g1] Client [2559] waits on 172.17.0.3:41249 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp1-g0] PASSED [ 19%] tests/go/test_pipeline.py::test_nodepred_data[csv] Client[0] in group[0] is exiting... PASSED [ 20%] tests/compute/test_sampling.py::test_node2vec PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp1-g1] PASSED [ 20%] tests/compute/test_sampling.py::test_pack_traces PASSED [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[True] SKIPPED [ 20%] tests/compute/test_sampling.py::test_pinsage_sampling[False] Server (1) shutdown. Server is exiting... Server (0) shutdown. Server is exiting... Server (2) shutdown. Server is exiting... PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp2-g0] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_noprob PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp2-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g1] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_prob PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp4-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp4-g1] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_outedge PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp5-g0] PASSED [ 20%] tests/go/test_pipeline.py::test_nodepred_data[reddit] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp5-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp0-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp0-g1] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_topk_outedge PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_with_0deg PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_biased_homogeneous PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp1-g0] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_biased_bipartite PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp1-g1] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-coo] PASSED [ 20%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-in-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-coo] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[False-out-csc] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp2-g0] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-coo] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-in-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-coo] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_homogeneous[True-out-csc] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp2-g1] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[in-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csr] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_etype_sorted_homogeneous[out-csc] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_heteroG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int32] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp3-g0] PASSED [ 21%] tests/compute/test_sampling.py::test_sample_neighbors_exclude_edges_homoG[int64] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] PASSED [ 21%] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] PASSED [ 21%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[True] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp3-g1] PASSED [ 21%] tests/compute/test_serialize.py::test_graph_serialize_with_feature[False] PASSED [ 21%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[True] PASSED [ 21%] tests/compute/test_serialize.py::test_graph_serialize_without_feature[False] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[True] PASSED [ 22%] tests/go/test_pipeline.py::test_nodepred_data[co-buy-computer] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g0] PASSED [ 22%] tests/compute/test_serialize.py::test_graph_serialize_with_labels[False] PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_tensors PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_empty_dict PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files1 PASSED [ 22%] tests/compute/test_serialize.py::test_load_old_files2 PASSED [ 22%] tests/compute/test_serialize.py::test_deserialize_old_heterograph_file PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph PASSED [ 22%] tests/compute/test_serialize.py::test_serialize_heterograph_s3 SKIPPED [ 22%] tests/compute/test_shared_mem.py::test_single_process[int32] PASSED [ 22%] tests/compute/test_shared_mem.py::test_single_process[int64] PASSED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[int32] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g1] PASSED [ 22%] tests/compute/test_shared_mem.py::test_multi_process[int64] PASSED [ 22%] tests/compute/test_shared_mem.py::test_copy_from_gpu SKIPPED (Need g...) [ 22%] tests/compute/test_sort.py::test_sort_with_tag[int32] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp5-g0] PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag[int64] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp5-g1] PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[int32] PASSED [ 22%] tests/compute/test_sort.py::test_sort_with_tag_bipartite[int64] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp0-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp0-g0] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp0-g1] Converting to homogeneous graph takes 0.011s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.019 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.001 seconds, peak memory: 1.841 GB [05:36:28] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 30030 nodes and 601200 edges into 3 parts and get 152443 edge cuts Metis partitioning: 0.185 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.205s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.021 seconds Split the graph: 0.055 seconds Construct subgraphs: 0.048 seconds Splitting the graph into partitions takes 0.124s, peak mem: 1.841 GB part 0 has 9953 nodes of type n1 and 3136 are inside the partition part 0 has 9963 nodes of type n2 and 3140 are inside the partition part 0 has 9959 nodes of type n3 and 3142 are inside the partition part 0 has 47353 edges of type r1 and 31289 are inside the partition part 0 has 47698 edges of type r2 and 31241 are inside the partition part 0 has 47467 edges of type r3 and 31243 are inside the partition part 1 has 9968 nodes of type n1 and 3432 are inside the partition part 1 has 9983 nodes of type n2 and 3435 are inside the partition part 1 has 9990 nodes of type n3 and 3440 are inside the partition part 1 has 51615 edges of type r1 and 34407 are inside the partition part 1 has 51895 edges of type r2 and 34446 are inside the partition part 1 has 51968 edges of type r3 and 34664 are inside the partition part 2 has 9975 nodes of type n1 and 3432 are inside the partition part 2 has 9988 nodes of type n2 and 3435 are inside the partition part 2 has 9983 nodes of type n3 and 3438 are inside the partition part 2 has 51626 edges of type r1 and 34404 are inside the partition part 2 has 51781 edges of type r2 and 34513 are inside the partition part 2 has 51640 edges of type r3 and 34393 are inside the partition Save partitions: 0.033 seconds, peak memory: 1.841 GB There are 300600 edges in the graph and 0 edge cuts for 3 partitions. PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp0-g1] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp1-g0] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp1-g0] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:36:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18667]... PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp2-g0] PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp2-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp1-g1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:36:30] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:30] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18670]... PASSED [ 22%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp3-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp2-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp3-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp2-g1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp4-g0] load test_sampling start graph service on server 2 for part 2 [05:36:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:18673]... PASSED [ 23%] tests/go/test_pipeline.py::test_nodepred_data[ogbn-arxiv] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp4-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp3-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp5-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp5-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp3-g1] [05:36:32] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:32] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp0-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp4-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp0-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp4-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp1-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp5-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp2-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_rhs-shp5-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp2-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp0-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp3-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp3-g1] Client [2599] waits on 172.17.0.3:59965 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp0-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp4-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp4-g1] Client[0] in group[0] is exiting... PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp1-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp5-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-sub-shp5-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp1-g1] Server (2) shutdown. Server is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp0-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp2-g0] PASSED [ 52%] tests/distributed/test_mp_dataloader.py::test_dataloader[edge-4-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:36:37] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.004 seconds Splitting the graph into partitions takes 0.008s, peak mem: 1.841 GB PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp0-g1] part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.019 seconds, peak memory: 1.841 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. PASSED [ 24%] tests/go/test_pipeline.py::test_nodepred_data[ogbn-products] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp2-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp1-g0] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp1-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp3-g0] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:36:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:11423]... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp3-g1] PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp2-g1] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:36:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:11426]... PASSED [ 23%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp3-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp4-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp4-g1] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:36:40] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:40] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:11429]... PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp4-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp4-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp5-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp5-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp5-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-add-shp5-g1] [05:36:41] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:41] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp0-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp0-g0] [[05:36:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc05:36:42:] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc140:: Sender with NetType~140socket: is created.Sender with NetType~ socket is created. [05:36:42] [/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc05:36:42:] 159/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc: :Receiver with NetType~159socket: is created.Receiver with NetType~ socket is created. [05:36:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:42] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp0-g1] [05:36:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:36:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp0-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp1-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp2-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp2-g1] PASSED [ 25%] tests/go/test_pipeline.py::test_nodepred_model[gcn] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp1-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp3-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp2-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp4-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp4-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp2-g1] Client [2635] waits on 172.17.0.3:43935Client [2636] waits on 172.17.0.3:34133 Client [2637] waits on 172.17.0.3:58657 Client [2629] waits on 172.17.0.3:55007 Client [2638] waits on 172.17.0.3:46993 Machine (0) group (0) client (1) connect to server successfuly!Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly! Machine (0) group (0) client (4) connect to server successfuly! Machine (0) group (0) client (3) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp5-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp3-g0] Client[3] in group[0] is exiting... Client[1] in group[0] is exiting... PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-div-shp5-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp3-g1] Client[0] in group[0] is exiting... PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp0-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g0] Client[4] in group[0] is exiting... PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g1] Client[2] in group[0] is exiting... PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp1-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp1-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp5-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp5-g1] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp2-g1] Server (2) shutdown. Server (0) shutdown. Server is exiting...Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g0] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp3-g0] PASSED [ 27%] tests/go/test_pipeline.py::test_nodepred_model[gat] PASSED [ 24%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp4-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp4-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp5-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp5-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp0-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp0-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp1-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp1-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp2-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp2-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp2-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp2-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp3-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp3-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp4-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp4-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp5-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-sum-copy_rhs-shp5-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp0-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp1-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g1] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp1-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp5-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp2-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp5-g1] PASSED [ 28%] tests/go/test_pipeline.py::test_nodepred_model[sage] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp3-g0] PASSED [ 25%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp3-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp0-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp4-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp4-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp5-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-add-shp5-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp1-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp1-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp2-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp3-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp3-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp3-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp3-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp4-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp4-g0] PASSED [ 29%] tests/go/test_pipeline.py::test_nodepred_model[sgc] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp4-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp4-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp5-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp5-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp5-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp5-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp0-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp0-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp1-g0] Converting to homogeneous graph takes 0.008s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.017 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.001 seconds, peak memory: 1.841 GB [05:37:06] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 30030 nodes and 601200 edges into 3 parts and get 152327 edge cuts Metis partitioning: 0.178 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.197s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.019 seconds Split the graph: 0.054 seconds Construct subgraphs: 0.048 seconds Splitting the graph into partitions takes 0.121s, peak mem: 1.841 GB part 0 has 9943 nodes of type n1 and 3134 are inside the partition part 0 has 9950 nodes of type n2 and 3140 are inside the partition part 0 has 9967 nodes of type n3 and 3142 are inside the partition part 0 has 47569 edges of type r1 and 31233 are inside the partition part 0 has 47527 edges of type r2 and 31191 are inside the partition part 0 has 47507 edges of type r3 and 31390 are inside the partition part 1 has 9977 nodes of type n1 and 3433 are inside the partition part 1 has 9976 nodes of type n2 and 3434 are inside the partition part 1 has 9993 nodes of type n3 and 3438 are inside the partition part 1 has 51495 edges of type r1 and 34423 are inside the partition part 1 has 51685 edges of type r2 and 34458 are inside the partition part 1 has 51892 edges of type r3 and 34467 are inside the partition part 2 has 9967 nodes of type n1 and 3433 are inside the partition part 2 has 9984 nodes of type n2 and 3436 are inside the partition part 2 has 9990 nodes of type n3 and 3440 are inside the partition part 2 has 51628 edges of type r1 and 34444 are inside the partition part 2 has 51884 edges of type r2 and 34551 are inside the partition part 2 has 51740 edges of type r3 and 34443 are inside the partition Save partitions: 0.041 seconds, peak memory: 1.841 GB There are 300600 edges in the graph and 0 edge cuts for 3 partitions. server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:37:07] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:07] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:25054]... PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp1-g1] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:37:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:25057]... PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp0-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp1-g0] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:37:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:09] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:25060]... PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp3-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp1-g1] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp3-g1] [05:37:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp2-g0] PASSED [ 26%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp4-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp4-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp5-g0] [05:37:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:37:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:37:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [[05:37:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc05:37:11:] 159/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc: :Receiver with NetType~140socket: is created.Sender with NetType~ socket is created. [05:37:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp5-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp3-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp3-g1] PASSED [ 30%] tests/go/test_pipeline.py::test_nodepred_model[gin] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp1-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp4-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp1-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp4-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp2-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp2-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp5-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp3-g0] Client [2751] waits on 172.17.0.3:59413 Client [2754] waits on 172.17.0.3:59931 Client [2747] waits on 172.17.0.3:43589 Client [2753] waits on 172.17.0.3:40831Client [2752] waits on 172.17.0.3:51987 Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (1) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly! Machine (0) group (0) client (3) connect to server successfuly! Machine (0) group (0) client (4) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp3-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_lhs-shp5-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp4-g1] Client[1] in group[0] is exiting... Client[3] in group[0] is exiting... PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp0-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp5-g0] Client[2] in group[0] is exiting... PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-div-shp5-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp1-g0] Client[0] in group[0] is exiting... PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp0-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp0-g1] Client[4] in group[0] is exiting... PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp1-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp1-g0] Server (0) shutdown. Server is exiting... PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp1-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp2-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp2-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp2-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp2-g1] Server (1) shutdown.Server (2) shutdown. Server is exiting...Server is exiting... PASSED [ 32%] tests/go/test_pipeline.py::test_nodepred_ns_data[cora] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp3-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp3-g0] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp3-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp3-g1] PASSED [ 27%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp4-g0] PASSED [ 54%] tests/distributed/test_mp_dataloader.py::test_neg_dataloader[0-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:37:20] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.002 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.003s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.006s, peak mem: 1.841 GB part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.017 seconds, peak memory: 1.841 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp4-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp5-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_lhs-shp5-g1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:37:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:27571]... PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp4-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp0-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp5-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp1-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp1-g1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:37:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:27574]... PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-min-copy_rhs-shp5-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp2-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp2-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp0-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp3-g0] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:37:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:27577]... PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp3-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp4-g0] [05:37:24] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:24] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp4-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp5-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp1-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-min-copy_rhs-shp5-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp1-g1] PASSED [ 33%] tests/go/test_pipeline.py::test_nodepred_ns_data[citeseer] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp0-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp0-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp2-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp1-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp1-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp2-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp3-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp2-g1] Client [2853] waits on 172.17.0.3:50351 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp3-g0] Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server (2) shutdown.Server is exiting... Server is exiting... PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp3-g1] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp3-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp4-g0] PASSED [ 28%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-add-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp0-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-add-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp0-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp0-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp1-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp0-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp1-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp2-g1] PASSED [ 34%] tests/go/test_pipeline.py::test_nodepred_ns_data[pubmed] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp3-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp3-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp4-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp2-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp3-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp0-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp3-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp0-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp4-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp1-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp1-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp2-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp3-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp3-g1] PASSED [ 35%] tests/go/test_pipeline.py::test_nodepred_ns_data[csv] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp0-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp4-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp4-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp0-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp5-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp5-g1] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp1-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp0-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp0-g1] Converting to homogeneous graph takes 0.010s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.017 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.001 seconds, peak memory: 1.841 GB PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp1-g0] [05:37:43] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 30030 nodes and 601200 edges into 3 parts and get 150005 edge cuts Metis partitioning: 0.205 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.224s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.018 seconds Split the graph: 0.053 seconds Construct subgraphs: 0.047 seconds Splitting the graph into partitions takes 0.118s, peak mem: 1.841 GB part 0 has 9954 nodes of type n1 and 3134 are inside the partition part 0 has 9953 nodes of type n2 and 3138 are inside the partition part 0 has 9962 nodes of type n3 and 3141 are inside the partition part 0 has 47034 edges of type r1 and 31192 are inside the partition part 0 has 47153 edges of type r2 and 31126 are inside the partition part 0 has 47225 edges of type r3 and 31379 are inside the partition part 1 has 9978 nodes of type n1 and 3433 are inside the partition part 1 has 9976 nodes of type n2 and 3436 are inside the partition part 1 has 9984 nodes of type n3 and 3439 are inside the partition part 1 has 51442 edges of type r1 and 34436 are inside the partition part 1 has 51701 edges of type r2 and 34684 are inside the partition part 1 has 51404 edges of type r3 and 34408 are inside the partition part 2 has 9964 nodes of type n1 and 3433 are inside the partition part 2 has 9974 nodes of type n2 and 3436 are inside the partition part 2 has 9981 nodes of type n3 and 3440 are inside the partition part 2 has 51573 edges of type r1 and 34472 are inside the partition part 2 has 51494 edges of type r2 and 34390 are inside the partition part 2 has 51579 edges of type r3 and 34513 are inside the partition Save partitions: 0.035 seconds, peak memory: 1.841 GB There are 300600 edges in the graph and 0 edge cuts for 3 partitions. PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp1-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp2-g0] PASSED [ 29%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp3-g0] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:37:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:14090]... PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp3-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp3-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp3-g1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:37:45] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:45] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:14093]... PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp4-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp4-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp4-g1] server: #clients=1 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:37:46] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:46] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:14096]... PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp4-g1] PASSED [ 37%] tests/go/test_pipeline.py::test_nodepred_ns_data[reddit] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp5-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp5-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-div-shp5-g1] [05:37:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:47] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-mul-shp5-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp0-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp0-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp0-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp0-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp1-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp3-g0] PASSED [ 30%]PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp1-g1] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp3-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp4-g0] Client [2893] waits on 172.17.0.3:47041 Machine (0) group (0) client (0) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp4-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp5-g0] Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (2) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_lhs-shp5-g1] PASSED [ 56%] tests/distributed/test_mp_dataloader.py::test_neg_dataloader[4-3] NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done loading data from cached files. Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.001 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:37:52] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 2708 nodes and 10556 edges into 3 parts and get 325 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.004 seconds Splitting the graph into partitions takes 0.007s, peak mem: 1.841 GB part 0 has 1046 nodes and 903 are inside the partition part 0 has 3490 edges and 3317 are inside the partition part 1 has 1064 nodes and 902 are inside the partition part 1 has 4006 edges and 3739 are inside the partition part 2 has 1056 nodes and 903 are inside the partition part 2 has 3710 edges and 3500 are inside the partition Save partitions: 0.020 seconds, peak memory: 1.841 GB There are 10556 edges in the graph and 0 edge cuts for 3 partitions. PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp0-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp3-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp1-g1] PASSED [ 38%] tests/go/test_pipeline.py::test_nodepred_ns_data[co-buy-computer] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:37:53] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:53] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:22403]... PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp2-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp3-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp4-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp3-g1] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:37:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:22406]... PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp4-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp5-g0] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:37:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:55] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:22409]... PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-div-shp5-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp5-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int32-max-copy_rhs-shp5-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp0-g1] [05:37:56] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:56] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp1-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp0-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp1-g1] [05:37:57] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:57] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:37:57] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:57] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:37:57] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:57] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:37:57] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:37:57] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp1-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp2-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp2-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp1-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp3-g0] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp2-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp3-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp4-g1] PASSED [ 30%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp5-g1] Client [2928] waits on 172.17.0.3:38919Client [2930] waits on 172.17.0.3:45477Client [2923] waits on 172.17.0.3:56727 Client [2929] waits on 172.17.0.3:54889 Client [2927] waits on 172.17.0.3:41805 Machine (0) group (0) client (3) connect to server successfuly! Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly! Machine (0) group (0) client (1) connect to server successfuly!Machine (0) group (0) client (4) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp3-g1] Client[4] in group[0] is exiting... Client[1] in group[0] is exiting... PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp0-g1] Client[0] in group[0] is exiting... PASSED [ 39%] tests/go/test_pipeline.py::test_nodepred_ns_data[ogbn-arxiv] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp4-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp1-g0] Client[3] in group[0] is exiting... PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp1-g1] Client[2] in group[0] is exiting... PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp2-g0] Server (2) shutdown.Server (0) shutdown. Server is exiting... Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_lhs-shp5-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp4-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-sub-shp5-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp0-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp0-g1] PASSED [ 40%] tests/go/test_pipeline.py::test_nodepred_ns_data[ogbn-products] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp1-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp1-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp2-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp3-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp4-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp5-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp4-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp5-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp1-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp1-g1] PASSED [ 41%] tests/go/test_pipeline.py::test_nodepred_ns_model[gcn] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp2-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp2-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp3-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp3-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp1-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp4-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp2-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp4-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp2-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp5-g1] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp3-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp0-g0] PASSED [ 31%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp3-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp0-g1] Converting to homogeneous graph takes 0.008s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.017 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.001 seconds, peak memory: 1.841 GB [05:38:16] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 30030 nodes and 601200 edges into 3 parts and get 150005 edge cuts Metis partitioning: 0.198 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.216s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.016 seconds Split the graph: 0.045 seconds Construct subgraphs: 0.048 seconds Splitting the graph into partitions takes 0.109s, peak mem: 1.841 GB part 0 has 9954 nodes of type n1 and 3134 are inside the partition part 0 has 9953 nodes of type n2 and 3138 are inside the partition part 0 has 9962 nodes of type n3 and 3141 are inside the partition part 0 has 47034 edges of type r1 and 31192 are inside the partition part 0 has 47153 edges of type r2 and 31126 are inside the partition part 0 has 47225 edges of type r3 and 31379 are inside the partition part 1 has 9978 nodes of type n1 and 3433 are inside the partition part 1 has 9976 nodes of type n2 and 3436 are inside the partition part 1 has 9984 nodes of type n3 and 3439 are inside the partition part 1 has 51442 edges of type r1 and 34436 are inside the partition part 1 has 51701 edges of type r2 and 34684 are inside the partition part 1 has 51404 edges of type r3 and 34408 are inside the partition part 2 has 9964 nodes of type n1 and 3433 are inside the partition part 2 has 9974 nodes of type n2 and 3436 are inside the partition part 2 has 9981 nodes of type n3 and 3440 are inside the partition part 2 has 51573 edges of type r1 and 34472 are inside the partition part 2 has 51494 edges of type r2 and 34390 are inside the partition part 2 has 51579 edges of type r3 and 34513 are inside the partition Save partitions: 0.037 seconds, peak memory: 1.841 GB There are 300600 edges in the graph and 0 edge cuts for 3 partitions. server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 0 for part 0 [05:38:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:12582]... PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp4-g1] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 1 for part 1 [05:38:19] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:19] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:12585]... PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp1-g1] PASSED [ 43%] tests/go/test_pipeline.py::test_nodepred_ns_model[gat] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp2-g0] server: #clients=5 /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") load test_sampling start graph service on server 2 for part 2 [05:38:20] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:20] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:12588]... PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp0-g0] [05:38:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp3-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp3-g1] [05:38:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:38:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp4-g0] [05:38:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:38:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp5-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp1-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp0-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp0-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp2-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp1-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp2-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp2-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp3-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp3-g1] Client [3043] waits on 172.17.0.3:38427Client [3044] waits on 172.17.0.3:59635 Client [3045] waits on 172.17.0.3:44859Client [3046] waits on 172.17.0.3:46803 Client [3037] waits on 172.17.0.3:39485 Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (4) connect to server successfuly!Machine (0) group (0) client (3) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly! Machine (0) group (0) client (1) connect to server successfuly! /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp4-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp5-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-sum-copy_rhs-shp5-g1] Client[1] in group[0] is exiting... Client[0] in group[0] is exiting... PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp0-g0] Client[4] in group[0] is exiting... PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp4-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp0-g1] Client[2] in group[0] is exiting... PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp5-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp1-g1] Client[3] in group[0] is exiting... PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-sub-shp5-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp2-g0] Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 44%] tests/go/test_pipeline.py::test_nodepred_ns_model[sage] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp0-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp0-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp3-g0] Server (2) shutdown. Server is exiting... PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp3-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp4-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp1-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp4-g1] PASSED [ 58%] tests/distributed/test_new_kvstore.py::test_partition_policy PASSED [ 60%] tests/distributed/test_new_kvstore.py::test_kv_store PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp2-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp5-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-add-shp5-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp0-g1] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' Sleep 5 seconds to test client re-connect. Sleep 5 seconds to test client re-connect. /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' [05:38:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' [05:38:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp2-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp1-g0] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp3-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp1-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp3-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp2-g1] PASSED [ 32%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp4-g0] PASSED [ 45%] tests/go/test_pipeline.py::test_linkpred_data[cora] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp3-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp4-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp3-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp5-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp4-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-mul-shp5-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-sub-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp1-g0] [05:38:36] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:36] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:28089]... [05:38:36] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:36] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:28090]... PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp1-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp1-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp2-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp3-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp2-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp3-g1] PASSED [ 46%] tests/go/test_pipeline.py::test_linkpred_data[citeseer] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp3-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp4-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp3-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp4-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp5-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp4-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp0-g0] Client [3137] waits on 172.17.0.3:54393 Client [3136] waits on 172.17.0.3:41255 Machine (0) group (0) client (1) connect to server successfuly! Machine (0) group (0) client (0) connect to server successfuly! ['data_2', 'data_0_1', 'data_0_3', 'data_1', 'data_0', 'data_0_2'] ['data_0', 'data_1', 'data_0_2', 'data_0_3', 'data_2', 'data_0_1'] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp5-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp1-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-div-shp5-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp1-g1] add... PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp0-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp2-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp2-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp0-g1] add... Client[0] in group[0] is exiting... Client[1] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp3-g0] PASSED [ 61%] tests/distributed/test_new_kvstore.py::test_kv_multi_role PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp3-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp4-g0] /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' [05:38:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' [05:38:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:19607]... /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' PASSED [ 48%] tests/go/test_pipeline.py::test_linkpred_data[pubmed] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp4-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp1-g1] [05:38:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' [05:38:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:43] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:19606]... PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp5-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp2-g0] [05:38:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:38:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-div-shp5-g1] [05:38:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:38:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:44] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp2-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp0-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp0-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp3-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp1-g1] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp3-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp2-g0] PASSED [ 33%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp4-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp2-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp3-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp3-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp5-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_lhs-shp5-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp4-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp0-g0] PASSED [ 49%] tests/go/test_pipeline.py::test_linkpred_data[csv] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp5-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_lhs-shp5-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp0-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp0-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp0-g1] Client [3184] waits on 172.17.0.3:60381Client [3191] waits on 172.17.0.3:52669 Client [3183] waits on 172.17.0.3:36033 Client [3173] waits on 172.17.0.3:47463Client [3172] waits on 172.17.0.3:47927 Client [3190] waits on 172.17.0.3:40983 Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (1) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly! Machine (0) group (0) client (3) connect to server successfuly! Machine (0) group (0) client (4) connect to server successfuly! Machine (0) group (0) client (5) connect to server successfuly! PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp1-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp2-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp1-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp2-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp4-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp5-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp3-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-min-copy_rhs-shp5-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp0-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp3-g1] PASSED [ 50%] tests/go/test_pipeline.py::test_linkpred_data[reddit] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp0-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp4-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp1-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp4-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp2-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp2-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp5-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp3-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-sum-copy_rhs-shp5-g1] [05:38:54] /root/jenkins/workspace/dgl_PR-4648/src/runtime/semaphore_wrapper.cc:83: sem_timedwait timed out after 5000 milliseconds. [05:38:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/network/socket_communicator.cc:283: Timed out when trying to receive rpc meta data after 5000 milliseconds. [05:38:54] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:84: Recv RPCMessage timeout in 5000 ms. Retrying ... i: 0 role: default trainer rank: 1, global rank: 1 Client[3] in group[0] is exiting... i: 1 role: default trainer rank: 0, global rank: 0 Client[2] in group[0] is exiting... Client[0] in group[0] is exiting... Client[1] in group[0] is exiting... Client[5] in group[0] is exiting... Client[4] in group[0] is exiting... PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp3-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp0-g0] Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp4-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp4-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp0-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp5-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-add-shp5-g1] PASSED [ 63%] tests/distributed/test_partition.py::test_partition PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp1-g1] FAILED [ 65%] tests/distributed/test_partition.py::test_hetero_partition Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:38:57] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 3000 nodes and 6000 edges into 2 parts and get 216 edge cuts Metis partitioning: 0.003 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.004s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.005s, peak mem: 1.841 GB part 0 has 1142 nodes of type item and 1030 are inside the partition part 0 has 607 nodes of type user and 514 are inside the partition part 0 has 1693 edges of type like and 1575 are inside the partition part 1 has 1064 nodes of type item and 970 are inside the partition part 1 has 589 nodes of type user and 486 are inside the partition part 1 has 1523 edges of type like and 1425 are inside the partition Save partitions: 0.002 seconds, peak memory: 1.841 GB There are 3000 edges in the graph and 0 edge cuts for 2 partitions. Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:38:57] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 3000 nodes and 6000 edges into 8 parts and get 449 edge cuts Metis partitioning: 0.007 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.009s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.002 seconds Splitting the graph into partitions takes 0.005s, peak mem: 1.841 GB part 0 has 1096 nodes of type item and 998 are inside the partition part 0 has 623 nodes of type user and 502 are inside the partition part 0 has 1587 edges of type like and 1475 are inside the partition part 1 has 1134 nodes of type item and 1002 are inside the partition part 1 has 597 nodes of type user and 498 are inside the partition part 1 has 1665 edges of type like and 1525 are inside the partition Save partitions: 0.002 seconds, peak memory: 1.841 GB There are 3000 edges in the graph and 0 edge cuts for 2 partitions. PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp0-g0] Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:38:57] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 3030 nodes and 6120 edges into 4 parts and get 336 edge cuts Metis partitioning: 0.004 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.005s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.001 seconds Construct subgraphs: 0.004 seconds Splitting the graph into partitions takes 0.006s, peak mem: 1.841 GB part 0 has 295 nodes of type n1 and 248 are inside the partition part 0 has 306 nodes of type n2 and 252 are inside the partition part 0 has 312 nodes of type n3 and 258 are inside the partition part 0 has 284 edges of type r1 and 255 are inside the partition part 0 has 265 edges of type r2 and 240 are inside the partition part 0 has 275 edges of type r3 and 244 are inside the partition part 1 has 316 nodes of type n1 and 257 are inside the partition part 1 has 317 nodes of type n2 and 257 are inside the partition part 1 has 321 nodes of type n3 and 262 are inside the partition part 1 has 274 edges of type r1 and 247 are inside the partition part 1 has 305 edges of type r2 and 277 are inside the partition part 1 has 320 edges of type r3 and 287 are inside the partition part 2 has 307 nodes of type n1 and 257 are inside the partition part 2 has 315 nodes of type n2 and 259 are inside the partition part 2 has 323 nodes of type n3 and 261 are inside the partition part 2 has 272 edges of type r1 and 250 are inside the partition part 2 has 325 edges of type r2 and 291 are inside the partition part 2 has 289 edges of type r3 and 259 are inside the partition part 3 has 288 nodes of type n1 and 238 are inside the partition part 3 has 293 nodes of type n2 and 242 are inside the partition part 3 has 288 nodes of type n3 and 239 are inside the partition part 3 has 286 edges of type r1 and 258 are inside the partition part 3 has 234 edges of type r2 and 212 are inside the partition part 3 has 267 edges of type r3 and 240 are inside the partition Save partitions: 0.005 seconds, peak memory: 1.841 GB There are 3060 edges in the graph and 0 edge cuts for 4 partitions. node n1: 1000, 1000 node n2: 1010, 1010 node n3: 1020, 1020 edge r1: 1010, 1010 edge r2: 1020, 1020 edge r3: 1030, 1030 Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:38:57] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 3030 nodes and 6120 edges into 8 parts and get 443 edge cuts Metis partitioning: 0.008 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.010s, peak mem: 1.841 GB Save partitions: 0.002 seconds, peak memory: 1.841 GB There are 3060 edges in the graph and 0 edge cuts for 1 partitions. node n1: 1000, 1000 node n2: 1010, 1010 node n3: 1020, 1020 edge r1: 1010, 1010 edge r2: 1020, 1020 edge r3: 1030, 1030 Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:38:57] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 3030 nodes and 6120 edges into 32 parts and get 794 edge cuts Metis partitioning: 0.030 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.031s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.004 seconds Splitting the graph into partitions takes 0.007s, peak mem: 1.841 GB part 0 has 329 nodes of type n1 and 248 are inside the partition part 0 has 317 nodes of type n2 and 249 are inside the partition part 0 has 334 nodes of type n3 and 255 are inside the partition part 0 has 262 edges of type r1 and 227 are inside the partition part 0 has 302 edges of type r2 and 264 are inside the partition part 0 has 303 edges of type r3 and 257 are inside the partition part 1 has 324 nodes of type n1 and 248 are inside the partition part 1 has 343 nodes of type n2 and 252 are inside the partition part 1 has 347 nodes of type n3 and 255 are inside the partition part 1 has 310 edges of type r1 and 262 are inside the partition part 1 has 321 edges of type r2 and 270 are inside the partition part 1 has 316 edges of type r3 and 267 are inside the partition part 2 has 335 nodes of type n1 and 256 are inside the partition part 2 has 328 nodes of type n2 and 254 are inside the partition part 2 has 352 nodes of type n3 and 255 are inside the partition part 2 has 310 edges of type r1 and 277 are inside the partition part 2 has 274 edges of type r2 and 227 are inside the partition part 2 has 291 edges of type r3 and 238 are inside the partition part 3 has 328 nodes of type n1 and 248 are inside the partition part 3 has 339 nodes of type n2 and 255 are inside the partition part 3 has 351 nodes of type n3 and 255 are inside the partition part 3 has 283 edges of type r1 and 244 are inside the partition part 3 has 314 edges of type r2 and 259 are inside the partition part 3 has 320 edges of type r3 and 268 are inside the partition Save partitions: 0.006 seconds, peak memory: 1.841 GB There are 3060 edges in the graph and 0 edge cuts for 4 partitions. node n1: 1000, 1000 node n2: 1010, 1010 node n3: 1020, 1020 edge r1: 1010, 1010 edge r2: 1020, 1020 edge r3: 1030, 1030 Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.002 seconds Construct subgraphs: 0.004 seconds Splitting the graph into partitions takes 0.007s, peak mem: 1.841 GB part 0 has 531 nodes of type n1 and 238 are inside the partition part 0 has 545 nodes of type n2 and 267 are inside the partition part 0 has 563 nodes of type n3 and 244 are inside the partition part 0 has 445 edges of type r1 and 272 are inside the partition part 0 has 448 edges of type r2 and 240 are inside the partition part 0 has 435 edges of type r3 and 230 are inside the partition part 1 has 553 nodes of type n1 and 263 are inside the partition part 1 has 566 nodes of type n2 and 251 are inside the partition part 1 has 552 nodes of type n3 and 260 are inside the partition part 1 has 432 edges of type r1 and 235 are inside the partition part 1 has 452 edges of type r2 and 274 are inside the partition part 1 has 469 edges of type r3 and 270 are inside the partition part 2 has 545 nodes of type n1 and 263 are inside the partition part 2 has 534 nodes of type n2 and 248 are inside the partition part 2 has 557 nodes of type n3 and 255 are inside the partition part 2 has 425 edges of type r1 and 250 are inside the partition part 2 has 429 edges of type r2 and 233 are inside the partition part 2 has 439 edges of type r3 and 247 are inside the partition part 3 has 551 nodes of type n1 and 236 are inside the partition part 3 has 559 nodes of type n2 and 244 are inside the partition part 3 has 542 nodes of type n3 and 261 are inside the partition part 3 has 449 edges of type r1 and 253 are inside the partition part 3 has 453 edges of type r2 and 273 are inside the partition part 3 has 463 edges of type r3 and 283 are inside the partition Save partitions: 0.006 seconds, peak memory: 1.841 GB There are 3060 edges in the graph and 0 edge cuts for 4 partitions. PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp0-g1] node n1: 1000, 1000 node n2: 1010, 1010 node n3: 1020, 1020 edge r1: 1010, 1010 edge r2: 1020, 1020 edge r3: 1030, 1030 Converting to homogeneous graph takes 0.001s, peak mem: 1.841 GB Convert a graph into a bidirected graph: 0.000 seconds, peak memory: 1.841 GB Construct multi-constraint weights: 0.000 seconds, peak memory: 1.841 GB [05:38:58] /root/jenkins/workspace/dgl_PR-4648/src/graph/transform/metis_partition_hetero.cc:87: Partition a graph with 3030 nodes and 6120 edges into 32 parts and get 794 edge cuts Metis partitioning: 0.026 seconds, peak memory: 1.841 GB Assigning nodes to METIS partitions takes 0.028s, peak mem: 1.841 GB Reshuffle nodes and edges: 0.001 seconds Split the graph: 0.001 seconds Construct subgraphs: 0.003 seconds Splitting the graph into partitions takes 0.006s, peak mem: 1.841 GB part 0 has 329 nodes of type n1 and 248 are inside the partition part 0 has 317 nodes of type n2 and 249 are inside the partition part 0 has 334 nodes of type n3 and 255 are inside the partition part 0 has 262 edges of type r1 and 227 are inside the partition part 0 has 302 edges of type r2 and 264 are inside the partition part 0 has 303 edges of type r3 and 257 are inside the partition part 1 has 324 nodes of type n1 and 248 are inside the partition part 1 has 343 nodes of type n2 and 252 are inside the partition part 1 has 347 nodes of type n3 and 255 are inside the partition part 1 has 310 edges of type r1 and 262 are inside the partition part 1 has 321 edges of type r2 and 270 are inside the partition part 1 has 316 edges of type r3 and 267 are inside the partition part 2 has 335 nodes of type n1 and 256 are inside the partition part 2 has 328 nodes of type n2 and 254 are inside the partition part 2 has 352 nodes of type n3 and 255 are inside the partition part 2 has 310 edges of type r1 and 277 are inside the partition part 2 has 274 edges of type r2 and 227 are inside the partition part 2 has 291 edges of type r3 and 238 are inside the partition part 3 has 328 nodes of type n1 and 248 are inside the partition part 3 has 339 nodes of type n2 and 255 are inside the partition part 3 has 351 nodes of type n3 and 255 are inside the partition part 3 has 283 edges of type r1 and 244 are inside the partition part 3 has 314 edges of type r2 and 259 are inside the partition part 3 has 320 edges of type r3 and 268 are inside the partition Save partitions: 0.006 seconds, peak memory: 1.841 GB There are 3060 edges in the graph and 0 edge cuts for 4 partitions. node n1: 1000, 1000 node n2: 1010, 1010 node n3: 1020, 1020 edge r1: 1010, 1010 edge r2: 1020, 1020 edge r3: 1030, 1030 PASSED [ 67%] tests/distributed/test_partition.py::test_BasicPartitionBook PASSED [ 69%] tests/distributed/test_partition.py::test_RangePartitionBook PASSED [ 70%] tests/distributed/test_rpc.py::test_rpc_timeout[socket] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp2-g0] PASSED [ 51%] tests/go/test_pipeline.py::test_linkpred_data[co-buy-computer] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp1-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp1-g1] Sleep 1 seconds to test client re-connect. [05:38:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:38:59] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp3-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp2-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp2-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp3-g1] Start server 0 [05:39:00] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:00] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:21850]... PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp3-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp3-g1] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp4-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp4-g0] PASSED [ 34%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp4-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp5-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp5-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp5-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-add-shp5-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp0-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp0-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp1-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp1-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp1-g0] PASSED [ 53%] tests/go/test_pipeline.py::test_linkpred_data[ogbn-arxiv] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp2-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp2-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp3-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp3-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp2-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp2-g1] Client [3259] waits on 172.17.0.3:34383 Machine (0) group (0) client (0) connect to server successfuly! PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp4-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp4-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp3-g0] [05:39:05] /root/jenkins/workspace/dgl_PR-4648/src/runtime/semaphore_wrapper.cc:83: sem_timedwait timed out after 500 milliseconds. [05:39:05] /root/jenkins/workspace/dgl_PR-4648/src/rpc/network/socket_communicator.cc:283: Timed out when trying to receive rpc meta data after 500 milliseconds. [05:39:05] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:84: Recv RPCMessage timeout in 500 ms. PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp5-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp5-g1] [05:39:06] /root/jenkins/workspace/dgl_PR-4648/src/runtime/semaphore_wrapper.cc:83: sem_timedwait timed out after 500 milliseconds. [05:39:06] /root/jenkins/workspace/dgl_PR-4648/src/rpc/network/socket_communicator.cc:283: Timed out when trying to receive rpc meta data after 500 milliseconds. [05:39:06] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:84: Recv RPCMessage timeout in 500 ms. PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp4-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp0-g0] [05:39:07] /root/jenkins/workspace/dgl_PR-4648/src/runtime/semaphore_wrapper.cc:83: sem_timedwait timed out after 500 milliseconds. [05:39:07] /root/jenkins/workspace/dgl_PR-4648/src/rpc/network/socket_communicator.cc:283: Timed out when trying to receive rpc meta data after 500 milliseconds. [05:39:07] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:84: Recv RPCMessage timeout in 500 ms. PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp1-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp5-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp1-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-sub-shp5-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp2-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp2-g1] PASSED [ 54%] tests/go/test_pipeline.py::test_linkpred_data[ogbn-products] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp0-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp3-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp0-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp4-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp4-g1] [05:39:10] /root/jenkins/workspace/dgl_PR-4648/src/runtime/semaphore_wrapper.cc:83: sem_timedwait timed out after 500 milliseconds. [05:39:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/network/socket_communicator.cc:283: Timed out when trying to receive rpc meta data after 500 milliseconds. [05:39:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:84: Recv RPCMessage timeout in 500 ms. Client[0] in group[0] is exiting... PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp1-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp5-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-div-shp5-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp2-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp0-g0] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp2-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp0-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp3-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp1-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp2-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp2-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp4-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp3-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp3-g1] PASSED [ 35%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp4-g1] PASSED [ 55%] tests/go/test_pipeline.py::test_linkpred_data[ogbl-collab] Server (0) shutdown. Server is exiting... PASSED [ 72%] tests/distributed/test_rpc.py::test_rpc_timeout[tensorpipe] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp4-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp5-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-mul-shp5-g1] [05:39:15] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:15] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:15] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:28918]. Sleep 1 seconds to test client re-connect. PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp5-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp5-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp0-g0] Start server 0 [05:39:16] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:16] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. Server is waiting for connections on [172.17.0.3:28918]... [05:39:16] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:28918]. PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp0-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp0-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp0-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp1-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp3-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp4-g0] PASSED [ 56%] tests/go/test_pipeline.py::test_linkpred_data[ogbl-citation2] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp4-g1] [05:39:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:41135]. Client [3279] waits on 172.17.0.3:41135 PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp5-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp3-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[int64-max-copy_rhs-shp5-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp1-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp3-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp4-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-u-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp0-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp1-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp1-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp4-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp2-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp3-g1] Machine (0) group (0) client (0) connect to server successfuly! PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp5-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-v-shp4-g1] [05:39:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/./queue.h:44: Times out for popping message after 500 milliseconds. [05:39:21] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:84: Recv RPCMessage timeout in 500 ms. PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-div-shp5-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp0-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp1-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp1-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp0-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp2-g1] [05:39:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/./queue.h:44: Times out for popping message after 500 milliseconds. [05:39:22] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:84: Recv RPCMessage timeout in 500 ms. PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp3-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp4-g0] PASSED [ 41%][05:39:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/./queue.h:44: Times out for popping message after 500 milliseconds. [05:39:23] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:84: Recv RPCMessage timeout in 500 ms. tests/compute/test_sparse.py::test_sddmm[int32-add-u-e-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp0-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp1-g0] PASSED [ 58%] tests/go/test_pipeline.py::test_linkpred_node_model[gcn] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp3-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp3-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-u-shp4-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp2-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-v-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp1-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp2-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-v-e-shp4-g1] [05:39:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/./queue.h:44: Times out for popping message after 500 milliseconds. [05:39:26] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:84: Recv RPCMessage timeout in 500 ms. Client[0] in group[0] is exiting... PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp0-g1] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp2-g0] PASSED [ 36%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp5-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp3-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_lhs-shp5-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp4-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-u-shp4-g1] PASSED [ 59%] tests/go/test_pipeline.py::test_linkpred_node_model[gat] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp0-g1] Server (0) shutdown. Server is exiting... PASSED [ 74%] tests/distributed/test_rpc.py::test_serialize PASSED [ 76%] tests/distributed/test_rpc.py::test_rpc_msg PASSED [ 78%] tests/distributed/test_rpc.py::test_rpc[tensorpipe] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp3-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp4-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp0-g1] [05:39:30] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:30] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:30] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:16300]. Sleep 1 seconds to test client re-connect. PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-v-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp3-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp4-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[int32-add-e-e-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp1-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-u-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp0-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp1-g0] Start server 0 PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp1-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp1-g1] [05:39:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. Server is waiting for connections on [172.17.0.3:16300]... [05:39:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:16300]. PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-v-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp1-g1] [05:39:33] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:48855]. Client [3301] waits on 172.17.0.3:48855 PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp2-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp3-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp4-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-u-e-shp4-g1] PASSED [ 60%] tests/go/test_pipeline.py::test_linkpred_node_model[sage] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp0-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp4-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp4-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp5-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-min-copy_rhs-shp5-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp4-g0] Machine (0) group (0) client (0) connect to server successfuly! PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-u-shp4-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g0] Client[0] in group[0] is exiting... PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp0-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp2-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-v-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp0-g0] Server (0) shutdown. Server is exiting... PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp0-g1] PASSED [ 46%]PASSED [ 80%] tests/distributed/test_rpc.py::test_multi_client[socket] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp1-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-v-e-shp4-g1] [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Sleep 1 seconds to test client re-connect. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp0-g0] [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [[05:39:38[] 05:39:38/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc] 05:39:38:/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc] :140/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc: 140:Sender with NetType~: 140socketSender with NetType~: is created.socketSender with NetType~ is created.socket is created. [05:39:38] [/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc05:39:38[:] 05:39:38159/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc] : :/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.ccReceiver with NetType~159:socket: 159 is created.Receiver with NetType~: socketReceiver with NetType~ is created.socket is created. [[05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:05:39:38140] : /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.ccSender with NetType~:socket140 is created.: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:[15905:39:38: ] Receiver with NetType~/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.ccsocket: is created.159 : Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:38] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp1-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp3-g0] PASSED [ 61%] tests/go/test_pipeline.py::test_linkpred_node_model[sgc] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp4-g0] Start server 0 [05:39:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:39:39] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:12279]... PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-u-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp0-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp1-g0] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-v-shp4-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-sub-e-e-shp4-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-u-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp0-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp1-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp1-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp2-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp3-g1] PASSED [ 37%] tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp0-g1] PASSED [ 62%] tests/go/test_pipeline.py::test_linkpred_node_model[gin] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp1-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp1-g1] Client [3324] waits on 172.17.0.3:60811 Client [3322] waits on 172.17.0.3:60539Client [3350] waits on 172.17.0.3:60133 Client [3339] waits on 172.17.0.3:59875 Client [3321] waits on 172.17.0.3:42499 Client [3340] waits on 172.17.0.3:43691 Client [3331] waits on 172.17.0.3:41001Client [3320] waits on 172.17.0.3:38685 Client [3327] waits on 172.17.0.3:34399 Client [3345] waits on 172.17.0.3:33877 Client [3333] waits on 172.17.0.3:44893 Client [3326] waits on 172.17.0.3:45687 Client [3319] waits on 172.17.0.3:48661 Client [3325] waits on 172.17.0.3:47561 Client [3329] waits on 172.17.0.3:53529 Client [3349] waits on 172.17.0.3:48929 Client [3328] waits on 172.17.0.3:53689 Client [3346] waits on 172.17.0.3:56611 Client [3323] waits on 172.17.0.3:57303 Client [3330] waits on 172.17.0.3:58351 Machine (0) group (0) client (3) connect to server successfuly! Machine (0) group (0) client (5) connect to server successfuly! Machine (0) group (0) client (6) connect to server successfuly! Machine (0) group (0) client (7) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly!Machine (0) group (0) client (11) connect to server successfuly! Machine (0) group (0) client (4) connect to server successfuly! Machine (0) group (0) client (1) connect to server successfuly! Machine (0) group (0) client (15) connect to server successfuly! Machine (0) group (0) client (14) connect to server successfuly! Machine (0) group (0) client (17) connect to server successfuly! Machine (0) group (0) client (12) connect to server successfuly!Machine (0) group (0) client (8) connect to server successfuly! Machine (0) group (0) client (16) connect to server successfuly! Machine (0) group (0) client (9) connect to server successfuly! Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (19) connect to server successfuly! Machine (0) group (0) client (13) connect to server successfuly! Machine (0) group (0) client (18) connect to server successfuly! Machine (0) group (0) client (10) connect to server successfuly! PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp0-g1] Client[11] in group[0] is exiting... Client[4] in group[0] is exiting... PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp3-g1] Client[16] in group[0] is exiting... Client[8] in group[0] is exiting... Client[12] in group[0] is exiting... Client[18] in group[0] is exiting... Client[14] in group[0] is exiting... Client[3] in group[0] is exiting... Client[1] in group[0] is exiting... Client[13] in group[0] is exiting... Client[19] in group[0] is exiting... Client[9] in group[0] is exiting... Client[10] in group[0] is exiting... Client[17] in group[0] is exiting... Client[7] in group[0] is exiting... Client[6] in group[0] is exiting... Client[5] in group[0] is exiting... Client[0] in group[0] is exiting... Client[2] in group[0] is exiting... Client[15] in group[0] is exiting... PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-u-e-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp0-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp1-g1] Server (0) shutdown. Server is exiting... PASSED [ 81%] tests/distributed/test_rpc.py::test_multi_client[tensorpipe] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-u-shp4-g1] [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [[05:39:4805:39:48] ] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc/root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc::140159: : Sender with NetType~Receiver with NetType~tensorpipetensorpipe is created. is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~[tensorpipe is created. 05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. Sleep 1 seconds to test client re-connect. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:11501]. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [[05:39:48] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:05:39:4837] : /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.ccFailed to connect to receiver[:tcp://172.17.0.3:11501].37 : Failed to connect to receiver[tcp://172.17.0.3:11501]. PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp3-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-v-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp0-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp3-g1] Start server 0 [05:39:49] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:39:49] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. Server is waiting for connections on [172.17.0.3:11501]... [05:39:49] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:11501]. PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp4-g0] PASSED [ 64%] tests/go/test_pipeline.py::test_linkpred_edge_model[ele] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-v-e-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp0-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp1-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp3-g1] [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:33563]. Client [3468] waits on 172.17.0.3:33563 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:48785]. Client [3469] waits on 172.17.0.3:48785 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:55543]. [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:51795]. [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:51813]. Client [3476] waits on 172.17.0.3:55543 Client [3483] waits on 172.17.0.3:51795 Client [3471] waits on 172.17.0.3:51813 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:36643]. Client [3473] waits on 172.17.0.3:36643 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:35533]. [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:55301]. Client [3475] waits on 172.17.0.3:35533 Client [3477] waits on 172.17.0.3:55301 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:47747]. [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:53047]. Client [3496] waits on 172.17.0.3:47747 Client [3472] waits on 172.17.0.3:53047 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:41577]. Client [3478] waits on 172.17.0.3:41577 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:49799]. Client [3474] waits on 172.17.0.3:49799 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:34857]. Client [3470] waits on 172.17.0.3:34857 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:41643]. [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:58793]. Client [3494] waits on 172.17.0.3:41643 Client [3499] waits on 172.17.0.3:58793 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:50777]. Client [3502] waits on 172.17.0.3:50777 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:55745]. Client [3497] waits on 172.17.0.3:55745 [05:39:51] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:50827]. Client [3479] waits on 172.17.0.3:50827 PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp5-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-u-shp4-g1] [[05:39:51] 05:39:51/root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc] :/root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc108:: 108TPReceiver starts to wait on [: tcp://172.17.0.3:53589TPReceiver starts to wait on [].tcp://172.17.0.3:59807 ]. Client [3481] waits on 172.17.0.3:53589Client [3489] waits on 172.17.0.3:59807 PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp0-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp5-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp1-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp1-g1] PASSED [ 50%]PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp0-g0] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp3-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp0-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-v-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-mul-e-e-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-u-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp0-g0] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp2-g0] Machine (0) group (0) client (1) connect to server successfuly! Machine (0) group (0) client (0) connect to server successfuly! Machine (0) group (0) client (8) connect to server successfuly! Machine (0) group (0) client (7) connect to server successfuly! Machine (0) group (0) client (4) connect to server successfuly! Machine (0) group (0) client (2) connect to server successfuly!Machine (0) group (0) client (5) connect to server successfuly!Machine (0) group (0) client (3) connect to server successfuly! Machine (0) group (0) client (10) connect to server successfuly! Machine (0) group (0) client (11) connect to server successfuly!Machine (0) group (0) client (14) connect to server successfuly! Machine (0) group (0) client (9) connect to server successfuly! Machine (0) group (0) client (6) connect to server successfuly! Machine (0) group (0) client (15) connect to server successfuly!Machine (0) group (0) client (19) connect to server successfuly!Machine (0) group (0) client (13) connect to server successfuly! Machine (0) group (0) client (17) connect to server successfuly! Machine (0) group (0) client (16) connect to server successfuly! Machine (0) group (0) client (12) connect to server successfuly! Machine (0) group (0) client (18) connect to server successfuly! PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp3-g1] PASSED [ 65%] tests/go/test_pipeline.py::test_linkpred_edge_model[bilinear] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-v-shp4-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp0-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp0-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp1-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp3-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-u-e-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp0-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp0-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp1-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp1-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp5-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp2-g1] PASSED [ 38%] tests/compute/test_sparse.py::test_spmm[idtype1-max-mul-shp5-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp0-g0] Client[8] in group[0] is exiting... Client[7] in group[0] is exiting... PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-u-shp4-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp0-g1] Client[1] in group[0] is exiting... Client[0] in group[0] is exiting... Client[4] in group[0] is exiting... PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp2-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-v-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp0-g0] PASSED [ 66%] tests/go/test_pipeline.py::test_linkpred_neg_sampler[global] Client[9] in group[0] is exiting... Client[10] in group[0] is exiting... Client[3] in group[0] is exiting... Client[14] in group[0] is exiting... Client[19] in group[0] is exiting... Client[13] in group[0] is exiting... PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp0-g1] Client[5] in group[0] is exiting... Client[17] in group[0] is exiting... Client[15] in group[0] is exiting... Client[2] in group[0] is exiting... Client[11] in group[0] is exiting... PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp1-g1] Client[6] in group[0] is exiting... Client[12] in group[0] is exiting... PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp2-g1] Client[16] in group[0] is exiting... Client[18] in group[0] is exiting... Server (0) shutdown. Server is exiting... PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp3-g0] PASSED [ 83%] tests/distributed/test_rpc.py::test_multi_thread_rpc[socket] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp4-g0] [05:40:02] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:40:02] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-v-e-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp0-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp2-g1] Sleep 1 seconds to test client re-connect. Sleep 1 seconds to test client re-connect. PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp4-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-u-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp0-g0] Start server 1 [05:40:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:40:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:17469]... Start server 0 [05:40:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:40:04] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:17467]... PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp0-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp2-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-v-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp0-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[int32-div-e-e-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp2-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-u-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp0-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp5-g0] PASSED [ 67%] tests/go/test_pipeline.py::test_linkpred_neg_sampler[persource] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp2-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-div-shp5-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp3-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-v-shp4-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp0-g0] Client [1090] waits on 172.17.0.3:44973 Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp1-g0] PASSED [ 85%] tests/distributed/test_rpc.py::test_multi_thread_rpc[tensorpipe] [05:40:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:40:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:40:08] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:19482]. PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp2-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp4-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-u-e-shp4-g1] Sleep 1 seconds to test client re-connect. Sleep 1 seconds to test client re-connect. PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp0-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp1-g1] Start server 0 [05:40:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:40:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. Server is waiting for connections on [172.17.0.3:19482]... [05:40:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:19482]. Start server 1 PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp2-g0] [05:40:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:40:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. Server is waiting for connections on [172.17.0.3:19484]... [05:40:10] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:19484]. PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp2-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp3-g0] PASSED [ 69%] tests/go/test_pipeline.py::test_linkpred_neg_sampler[] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp3-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp4-g0] [05:40:11] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:59207]. Client [1090] waits on 172.17.0.3:59207 PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp4-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-u-shp4-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp4-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp0-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-v-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp0-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp1-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp1-g1] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp5-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp2-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp2-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-v-e-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp0-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_lhs-shp5-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp0-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp1-g0] PASSED [ 39%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp2-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp0-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp4-g0] Machine (0) group (0) client (0) connect to server successfuly! Client[0] in group[0] is exiting... Server (0) shutdown. Server is exiting... Server (1) shutdown. Server is exiting... PASSED [ 87%] tests/distributed/test_rpc.py::test_multi_client_groups SKIPPED (Tes...) [ 89%] tests/distributed/test_rpc.py::test_multi_client_connect[socket] [05:40:14] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:40:14] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp1-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-u-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp4-g0] PASSED [ 70%] tests/go/test_pipeline.py::test_graphpred[gin-csv] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-v-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp4-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-dot-e-e-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-u-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp1-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp2-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-v-shp4-g1] Expected error: Failed to build conncetion with peer after 1 retries. Please check network availability or increase max try times via 'DGL_DIST_MAX_TRY_TIMES'. PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp1-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp1-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp2-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-u-e-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp1-g1] [05:40:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:40:18] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Sleep 1 seconds to test client re-connect. PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp3-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp4-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-u-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp5-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp3-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp4-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-v-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp4-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-v-e-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_spmm[idtype1-max-copy_rhs-shp5-g1] Start server 0 [05:40:19] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~socket is created. [05:40:19] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~socket is created. Server is waiting for connections on [172.17.0.3:12614]... PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp1-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp0-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp0-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp1-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp1-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp2-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp2-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp3-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp4-g0] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-u-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp3-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp0-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp4-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-u-shp4-g1] PASSED [ 40%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp1-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp2-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp1-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp2-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-v-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-v-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp1-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_lhs-e-e-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-u-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp1-g1] PASSED [ 71%] tests/go/test_pipeline.py::test_graphpred[gin-ogbg-molhiv] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp2-g0] PASSED [ 62%]PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp3-g0] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp3-g1] PASSED [ 41%]PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp3-g1] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp4-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-u-e-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-v-shp4-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp0-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp0-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp0-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp1-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp1-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp1-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp2-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp3-g0] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp3-g0] Client [3644] waits on 172.17.0.3:46127 Machine (0) group (0) client (0) connect to server successfuly! PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp3-g1] PASSED [ 41%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp3-g1] Client[0] in group[0] is exiting... PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-u-e-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-u-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp0-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp2-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp4-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-v-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp0-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp1-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp1-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp0-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp2-g0] Server (0) shutdown. Server is exiting... PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp3-g0] PASSED [ 90%] tests/distributed/test_rpc.py::test_multi_client_connect[tensorpipe] [05:40:27] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:40:27] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:40:27] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:17628]. Expected error: Failed to build conncetion with peer[172.17.0.3:17628] after 1 retries. Please check network availability or increase max try times via 'DGL_DIST_MAX_TRY_TIMES'. PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp3-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-u-shp4-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp3-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp2-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp3-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-v-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp0-g1] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp1-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-v-e-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp2-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp0-g0] PASSED [ 42%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp0-g1] [05:40:28] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:40:28] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. [05:40:28] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:37: Failed to connect to receiver[tcp://172.17.0.3:17628]. Sleep 1 seconds to test client re-connect. PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp3-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp2-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-v-e-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp3-g1] Start server 0 [05:40:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:140: Sender with NetType~tensorpipe is created. [05:40:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/rpc.cc:159: Receiver with NetType~tensorpipe is created. Server is waiting for connections on [172.17.0.3:17628]... [05:40:29] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:17628]. PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp4-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-u-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp0-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp3-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp4-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-v-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp0-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp3-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int32-copy_rhs-e-e-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp2-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp3-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-u-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-u-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp3-g0] [05:40:31] /root/jenkins/workspace/dgl_PR-4648/src/rpc/tensorpipe/tp_communicator.cc:108: TPReceiver starts to wait on [tcp://172.17.0.3:48147]. Client [3662] waits on 172.17.0.3:48147 PASSED [ 72%] tests/go/test_pipeline.py::test_graphpred[gin-ogbg-molpcba] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp3-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp3-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp4-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-v-shp4-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp0-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp0-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp1-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp1-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp2-g0] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp2-g1] PASSED [ 43%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-add-e-e-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp0-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp1-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp4-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-u-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-v-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp0-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp1-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp2-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp2-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp3-g0] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp2-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp3-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-v-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-u-e-shp4-g1] PASSED [ 44%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp0-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp0-g1] Machine (0) group (0) client (0) connect to server successfuly! PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp1-g1] Client[0] in group[0] is exiting... PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp3-g0] Server (0) shutdown. Server is exiting... PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp2-g1] PASSED [ 92%] tests/distributed/test_shared_mem_store.py::test_init SKIPPED (skip ...) [ 94%] tests/distributed/test_shared_mem_store.py::test_compute SKIPPED (sk...) [ 96%] tests/distributed/test_shared_mem_store.py::test_sync_barrier SKIPPED [ 98%] tests/distributed/test_shared_mem_store.py::test_copy_shared_mem SKIPPED [100%] =================================== FAILURES =================================== ________________________________ test_partition ________________________________ @unittest.skipIf(os.name == 'nt', reason='Do not support windows yet') def test_partition(): g = create_random_graph(1000) > check_partition(g, 'metis', False) tests/distributed/test_partition.py:407: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/distributed/test_partition.py:247: in check_partition g.update_all(fn.copy_src('feats', 'msg'), fn.sum('msg', 'h')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.8143, -1.1574, 0.6893, ..., -0.5411, 0.6006, 1.0566], [-0.3864, -0.4807, -0.8135, ..., 1.400...0168, 0.9815, ..., 0.7116, 0.4885, -0.7186], [-0.9336, -1.3287, 0.1880, ..., -0.3666, -0.7204, 1.1899]]) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError =============================== warnings summary =============================== python/dgl/backend/backend.py:1741 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/backend.py:1741: DeprecationWarning: invalid escape sequence \P """ python/dgl/backend/pytorch/tensor.py:16 python/dgl/backend/pytorch/tensor.py:16 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:16: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) < LooseVersion("1.9.0"): python/dgl/backend/pytorch/tensor.py:340 python/dgl/backend/pytorch/tensor.py:340 /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:340: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) >= LooseVersion("1.10.0"): python/dgl/dataloading/dataloader.py:33 /root/jenkins/workspace/dgl_PR-4648/python/dgl/dataloading/dataloader.py:33: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(torch.__version__) python/dgl/_dataloading/pytorch/dataloader.py:23 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:23: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(th.__version__) python/dgl/_dataloading/pytorch/dataloader.py:24 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:24: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_16 = PYTORCH_VER >= LooseVersion("1.6.0") python/dgl/_dataloading/pytorch/dataloader.py:25 /root/jenkins/workspace/dgl_PR-4648/python/dgl/_dataloading/pytorch/dataloader.py:25: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_17 = PYTORCH_VER >= LooseVersion("1.7.0") python/dgl/heterograph.py:72 /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/distributed/test_dist_graph_store.py: 5 warnings tests/distributed/test_distributed_sampling.py: 9 warnings tests/distributed/test_mp_dataloader.py: 1 warning tests/distributed/test_partition.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/graph_partition_book.py:733: DGLWarning: Etype with 'str' format is deprecated. Please use '(str, str, str)'. "Etype with 'str' format is deprecated. Please use '(str, str, str)'.") tests/distributed/test_dist_graph_store.py::test_split[True] /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/_tensor.py:575: UserWarning: floor_divide is deprecated, and will be removed in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). (Triggered internally at /pytorch/aten/src/ATen/native/BinaryOps.cpp:467.) return torch.floor_divide(self, other) tests/distributed/test_distributed_sampling.py::test_rpc_find_edges_shuffle[1] tests/distributed/test_distributed_sampling.py::test_rpc_find_edges_shuffle[2] tests/distributed/test_distributed_sampling.py::test_rpc_get_degree_shuffle[1] tests/distributed/test_distributed_sampling.py::test_rpc_get_degree_shuffle[2] tests/distributed/test_distributed_sampling.py::test_rpc_sampling_shuffle[1] tests/distributed/test_distributed_sampling.py::test_rpc_sampling_shuffle[2] tests/distributed/test_distributed_sampling.py::test_rpc_in_subgraph /root/jenkins/workspace/dgl_PR-4648/python/dgl/heterograph.py:6168: DGLWarning: DGLGraph.readonly is deprecated in v0.5. DGLGraph now always supports mutable operations like add_nodes and add_edges. dgl_warning('DGLGraph.readonly is deprecated in v0.5.\n' tests/distributed/test_distributed_sampling.py::test_standalone_sampling tests/distributed/test_distributed_sampling.py::test_standalone_etype_sampling tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-True-0-3] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-True-4-3] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-False-0-3] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-False-4-3] /root/jenkins/workspace/dgl_PR-4648/python/dgl/distributed/partition.py:578: DGLWarning: The argument reshuffle will be deprecated in the next release. For heterogeneous graphs, reshuffle must be enabled. dgl_warning("The argument reshuffle will be deprecated in the next release. " tests/distributed/test_distributed_sampling.py::test_standalone_sampling tests/distributed/test_distributed_sampling.py::test_standalone_etype_sampling tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-True-0-3] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-True-4-3] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-False-0-3] tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-False-4-3] /root/jenkins/workspace/dgl_PR-4648/python/dgl/backend/pytorch/tensor.py:277: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). mask = th.tensor(mask, dtype=th.bool) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html - generated xml file: /root/jenkins/workspace/dgl_PR-4648/pytest_distributed.xml - ============================ slowest 100 durations ============================= 90.56s call tests/distributed/test_dist_graph_store.py::test_server_client 63.46s call tests/distributed/test_distributed_sampling.py::test_rpc_sampling_shuffle[2] 45.08s call tests/distributed/test_distributed_sampling.py::test_rpc_sampling_shuffle[1] 42.43s call tests/distributed/test_mp_dataloader.py::test_dataloader[edge-4-3] 42.26s call tests/distributed/test_mp_dataloader.py::test_dataloader[node-4-3] 38.21s call tests/distributed/test_mp_dataloader.py::test_neg_dataloader[4-3] 33.95s call tests/distributed/test_mp_dataloader.py::test_dataloader[edge-0-3] 33.13s call tests/distributed/test_mp_dataloader.py::test_dataloader[node-0-3] 31.83s call tests/distributed/test_mp_dataloader.py::test_neg_dataloader[0-3] 19.06s call tests/distributed/test_dist_graph_store.py::test_dist_emb_server_client 16.29s call tests/distributed/test_rpc.py::test_rpc_timeout[socket] 15.18s call tests/distributed/test_rpc.py::test_rpc_timeout[tensorpipe] 14.69s call tests/distributed/test_rpc.py::test_multi_client[tensorpipe] 14.64s call tests/distributed/test_new_kvstore.py::test_kv_multi_role 13.54s call tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-False-4-3] 13.33s call tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-True-False-4-3] 12.52s call tests/distributed/test_new_kvstore.py::test_kv_store 12.46s call tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-True-4-3] 12.21s call tests/distributed/test_rpc.py::test_multi_client_connect[socket] 12.08s call tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-True-True-4-3] 11.29s call tests/distributed/test_dist_graph_store.py::test_split[True] 10.54s call tests/distributed/test_distributed_sampling.py::test_rpc_find_edges_shuffle[2] 9.79s call tests/distributed/test_rpc.py::test_multi_client[socket] 9.33s call tests/distributed/test_distributed_sampling.py::test_rpc_in_subgraph 9.28s call tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-False-0-3] 9.14s call tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-True-False-0-3] 8.94s call tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-True-True-0-3] 8.65s call tests/distributed/test_distributed_sampling.py::test_rpc_find_edges_shuffle[1] 8.41s call tests/distributed/test_rpc.py::test_multi_client_connect[tensorpipe] 8.20s call tests/distributed/test_mp_dataloader.py::test_dist_dataloader[1-False-True-0-3] 8.02s call tests/distributed/test_rpc.py::test_rpc[tensorpipe] 6.38s call tests/distributed/test_rpc.py::test_multi_thread_rpc[socket] 6.35s call tests/distributed/test_rpc.py::test_multi_thread_rpc[tensorpipe] 5.59s call tests/distributed/test_distributed_sampling.py::test_rpc_get_degree_shuffle[2] 5.02s call tests/distributed/test_dist_graph_store.py::test_split_even 4.47s call tests/distributed/test_distributed_sampling.py::test_rpc_get_degree_shuffle[1] 4.16s call tests/distributed/test_dist_graph_store.py::test_split[False] 3.46s call tests/distributed/test_dist_graph_store.py::test_standalone 3.43s call tests/distributed/test_dist_graph_store.py::test_standalone_node_emb 0.42s call tests/distributed/test_partition.py::test_hetero_partition 0.32s call tests/distributed/test_mp_dataloader.py::test_standalone 0.32s call tests/distributed/test_distributed_sampling.py::test_standalone_etype_sampling 0.24s call tests/distributed/test_distributed_sampling.py::test_standalone_sampling 0.03s call tests/distributed/test_partition.py::test_partition 0.01s setup tests/distributed/test_mp_dataloader.py::test_dataloader[node-0-3] (55 durations < 0.005s hidden. Use -vv to show these durations.) =========================== short test summary info ============================ FAILED tests/distributed/test_partition.py::test_partition - TypeError: empty... ======= 1 failed, 48 passed, 6 skipped, 47 warnings in 720.74s (0:12:00) ======= PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp4-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-u-e-shp4-g1] FAIL: distributed [Pipeline] } PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp0-g0] [Pipeline] // timeout [Pipeline] } [Pipeline] // stage Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp3-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-u-shp4-g1] [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 9d11908e8a1623fe2cb3ee8ecb53c2f89a04f190533b91026267498f2decd76b PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp2-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp3-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-v-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-u-shp4-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp0-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp0-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp1-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp1-g1] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp2-g0] PASSED [ 45%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-v-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp0-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp3-g1] $ docker rm -f 9d11908e8a1623fe2cb3ee8ecb53c2f89a04f190533b91026267498f2decd76b PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp2-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-v-e-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp2-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp0-g0] [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp3-g0] [Pipeline] // node [Pipeline] } PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp0-g1] [Pipeline] // stage [Pipeline] } Failed in branch Distributed PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp1-g1] PASSED [ 74%] tests/go/test_pipeline.py::test_graphpred[pna-csv] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-v-e-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp2-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp0-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp0-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp3-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp1-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-u-shp4-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp2-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp0-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp2-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp1-g1] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp3-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp2-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp4-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-u-shp4-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp3-g0] PASSED [ 46%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp4-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-v-shp4-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp3-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp4-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-add-e-e-shp4-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-u-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-v-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp2-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp2-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp3-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp3-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp4-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-sub-e-e-shp4-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp0-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp0-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp1-g0] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp1-g1] PASSED [ 47%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp4-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-u-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp0-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp1-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-v-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp3-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp0-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp1-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-v-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp0-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp3-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp1-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-u-e-shp4-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp2-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp2-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp0-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp0-g1] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp1-g0] PASSED [ 48%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-u-e-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp1-g1] PASSED [ 75%] tests/go/test_pipeline.py::test_graphpred[pna-ogbg-molhiv] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp3-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-u-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-u-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp0-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp1-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp2-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp3-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp3-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp4-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-v-shp4-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-v-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp0-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp1-g1] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp2-g0] PASSED [ 49%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-v-e-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp1-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-v-e-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp0-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp1-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-u-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp0-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp0-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp2-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-u-shp4-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp4-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp0-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-v-shp4-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-sub-e-e-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-u-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp0-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp2-g0] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp1-g0] PASSED [ 76%] tests/go/test_pipeline.py::test_graphpred[pna-ogbg-molpcba] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp1-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp3-g1] PASSED [ 50%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-v-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-mul-e-e-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp0-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp1-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp2-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp2-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp3-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp4-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-u-shp4-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp0-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp0-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp3-g1] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp1-g0] PASSED [ 51%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-v-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp0-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-v-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp0-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp4-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-u-e-shp4-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp0-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp4-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp1-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-u-e-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp0-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp0-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp2-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp1-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp2-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp3-g1] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp3-g0] PASSED [ 52%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-u-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-u-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp2-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp4-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-v-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp3-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-v-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp0-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp1-g1] PASSED [ 77%] tests/go/test_pipeline.py::test_recipe[graphpred_hiv_gin.yaml] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp2-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp3-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp3-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-v-e-shp4-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp0-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-v-e-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp1-g0] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp1-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp2-g1] PASSED [ 53%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp1-g1] PASSED [ 79%] tests/go/test_pipeline.py::test_recipe[graphpred_hiv_pna.yaml] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp4-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-u-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp3-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp4-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-u-shp4-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp3-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp4-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-v-shp4-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp0-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp3-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-mul-e-e-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp1-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp2-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp3-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-u-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp0-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp1-g1] PASSED [ 54%]PASSED [ 80%] tests/go/test_pipeline.py::test_recipe[graphpred_pcba_gin.yaml] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp0-g1] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp3-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp3-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-v-shp4-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp0-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp0-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp1-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp1-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp2-g0] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp2-g1] PASSED [ 54%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-div-e-e-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp0-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp1-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp2-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-u-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp0-g1] PASSED [ 81%] tests/go/test_pipeline.py::test_recipe[linkpred_cora_sage.yaml] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp4-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-v-shp4-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp2-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp3-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp3-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp0-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-v-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp1-g1] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp2-g0] PASSED [ 55%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp2-g0] PASSED [ 82%] tests/go/test_pipeline.py::test_recipe[linkpred_citation2_sage.yaml] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp4-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-u-e-shp4-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp3-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp4-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-u-e-shp4-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp0-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp3-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp3-g1] PASSED [ 83%] tests/go/test_pipeline.py::test_recipe[linkpred_collab_sage.yaml] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp3-g0] PASSED [ 78%]PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp3-g1] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp4-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-u-shp4-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp4-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-u-shp4-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-v-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp0-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp0-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp1-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp1-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp2-g0] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp2-g1] PASSED [ 56%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-v-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp0-g1] PASSED [ 79%]PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp1-g0] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp2-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp1-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-v-e-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp2-g1] PASSED [ 85%] tests/go/test_pipeline.py::test_recipe[nodepred_citeseer_gat.yaml] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp0-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-v-e-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp2-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp3-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-u-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp1-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp0-g0] PASSED [ 86%] tests/go/test_pipeline.py::test_recipe[nodepred_citeseer_gcn.yaml] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp1-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp1-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp3-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp2-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp3-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp3-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp3-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp4-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp4-g0] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-u-shp4-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-v-shp4-g1] PASSED [ 57%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp3-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp3-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp4-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-div-e-e-shp4-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-u-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp1-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp1-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp1-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp1-g1] PASSED [ 87%] tests/go/test_pipeline.py::test_recipe[nodepred_citeseer_sage.yaml] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-v-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp2-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp3-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp3-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp4-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-dot-e-e-shp4-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp0-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp0-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp1-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp1-g1] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp2-g0] PASSED [ 58%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-u-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp0-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-v-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp1-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp1-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp0-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp2-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp1-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp1-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp2-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp3-g0] PASSED [ 88%] tests/go/test_pipeline.py::test_recipe[nodepred_cora_gat.yaml] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp2-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp3-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-v-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-u-e-shp4-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp0-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp0-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp1-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp0-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp1-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp2-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp2-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp1-g1] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp3-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp3-g1] PASSED [ 90%] tests/go/test_pipeline.py::test_recipe[nodepred_cora_gcn.yaml] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp4-g0] PASSED [ 59%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-u-e-shp4-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp3-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-u-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp3-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp2-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp3-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp3-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-v-shp4-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-u-shp4-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp0-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp0-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp1-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp2-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp2-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp3-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp4-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-v-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp0-g0] PASSED [ 91%] tests/go/test_pipeline.py::test_recipe[nodepred_cora_sage.yaml] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp3-g0] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp0-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp3-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp1-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-v-e-shp4-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp1-g1] PASSED [ 60%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp2-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp0-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp0-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp2-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp1-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp3-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp2-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-v-e-shp4-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp0-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp4-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp1-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-u-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp1-g1] PASSED [ 92%] tests/go/test_pipeline.py::test_recipe[nodepred_pubmed_gat.yaml] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp0-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp0-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp2-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp1-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp2-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp3-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp2-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp3-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-u-shp4-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-v-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp0-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-dot-e-e-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-u-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp0-g0] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp1-g0] PASSED [ 93%] tests/go/test_pipeline.py::test_recipe[nodepred_pubmed_gcn.yaml] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp1-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp2-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-v-shp4-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp3-g1] PASSED [ 61%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp0-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp1-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-v-shp4-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-u-e-shp4-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_lhs-e-e-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp0-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp0-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp1-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp2-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp3-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp4-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-u-shp4-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp1-g1] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp0-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp2-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp3-g0] PASSED [ 62%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp1-g1] PASSED [ 95%] tests/go/test_pipeline.py::test_recipe[nodepred_pubmed_sage.yaml] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-u-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp2-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp3-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-v-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp2-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp3-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp4-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp3-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-v-e-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp0-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-v-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp3-g1] PASSED [ 87%]PASSED [ 96%] tests/go/test_pipeline.py::test_recipe[nodepred-ns_arxiv_gcn.yaml] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp1-g1] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-u-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp0-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp1-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp3-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-u-e-shp4-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-v-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_lhs-e-e-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp2-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp2-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-u-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp0-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp0-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp1-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp2-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp2-g0] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp2-g1] PASSED [ 97%] tests/go/test_pipeline.py::test_recipe[nodepred-ns_product_sage.yaml] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp2-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp3-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-v-shp4-g1] PASSED [ 63%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-u-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp2-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp3-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp4-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-v-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp0-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp1-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp2-g1] PASSED [ 98%] tests/go/test_pipeline.py::test_node_cora PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp4-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp0-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp3-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp1-g0] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp4-g0] PASSED [ 64%]PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp1-g1] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-v-e-shp4-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp2-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp0-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp3-g1] PASSED [ 64%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp4-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-u-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp0-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-v-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp0-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp2-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-u-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp3-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp3-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp0-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-v-e-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp0-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp0-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp0-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp1-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp2-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp2-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp3-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-u-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp0-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp0-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp1-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp1-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp2-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp3-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp3-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp4-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-v-shp4-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp0-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp0-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp1-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp1-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp2-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp3-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp3-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp4-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-e-e-shp4-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp0-src-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp0-dst-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp1-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp1-src-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp1-dst-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp2-src-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int32-shp2-dst-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp0-src-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp0-dst-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp1-src-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp3-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp1-dst-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp2-src-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[int64-shp2-dst-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp3-g1] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[sum] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[max] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp4-g0] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[min] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-v-shp4-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp0-g0] PASSED [ 65%]PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[mean] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp0-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp1-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp1-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp2-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp2-g1] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp3-g0] PASSED [ 65%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype0-copy_rhs-e-e-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp0-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp1-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp2-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp3-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp4-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-u-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp0-g1] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-int64] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-int32] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-int64] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-int32] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-int64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_use_libxsmm_switch SKIPPED (Only ...) [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[int32] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp1-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp1-g1] FAILED [100%] =================================== FAILURES =================================== _________________________________ test_gcn[g0] _________________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(7,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'])) def test_gcn(g): data_info = { 'num_nodes': g.num_nodes(), 'out_size': 7 } node_feat = None edge_feat = g.edata['scalar_w'] # node embedding + not use_edge_weight model = GCN(data_info, embed_size=10, use_edge_weight=False) > model(g, node_feat) tests/go/test_model.py:18: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/gcn.py:65: in forward h = layer(g, h, edge_weight=edge_weight) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) python/dgl/nn/pytorch/conv/graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-2.6910e-01, -7.8042e-01, -2.7359e-03, -3.2366e-02, 5.7685e-01, -4.6568e-01, 2.5890e-01], ...586e+00, -9.3698e-01, 2.8112e-01, -3.2321e-01, -8.1060e-01, -5.0542e-01, 1.1881e+00]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ______________________________ test_gcn_block[g0] ______________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) @pytest.mark.parametrize('g', get_cases(['block-bipartite'])) def test_gcn_block(g): data_info = { 'in_size': 10, 'out_size': 7 } blocks = [g] node_feat = torch.randn(g.num_src_nodes(), data_info['in_size']) edge_feat = torch.abs(torch.randn(g.num_edges())) # not use_edge_weight model = GCN(data_info, use_edge_weight=False) > model.forward_block(blocks, node_feat) tests/go/test_model.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/gcn.py:75: in forward_block h = layer(block, h, edge_weight=edge_weight) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) python/dgl/nn/pytorch/conv/graphconv.py:423: in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.1589, 1.8696, 2.4331, -0.3975, -2.2170, -0.2013, -0.0745], [ 1.6567, -1.0327, -0.8260, -0.4221, ....0788, 0.8304], [-0.7473, 0.4214, 0.4989, -0.3357, 0.6086, -1.8190, -1.4260]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _________________________________ test_gat[g0] _________________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(2,), dtype=torch.float32), 'ft': Scheme(shap...r': Scheme(shape=(8, 1), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'])) def test_gat(g): data_info = { 'num_nodes': g.num_nodes(), 'out_size': 7 } node_feat = None # node embedding model = GAT(data_info, embed_size=10) > model(g, node_feat) tests/go/test_model.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/gat.py:77: in forward h = self.gat_layers[l](graph, h).flatten(1) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) python/dgl/nn/pytorch/conv/gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python/dgl/heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python/dgl/core.py:266: in invoke_gsddmm z = op(graph, x, y) python/dgl/ops/sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[-0.6326], [-0.1831], [-0.6531], [ 0.0632], [ 0.3255], [-1.1950]....1747], [ 0.0026], [-0.7224], [-1.0509], [ 0.2529]]], grad_fn=) rhs_data = tensor([[[-0.3539], [ 0.0378], [ 1.2875], [ 1.1894], [ 1.7702], [ 0.9975]....0768], [-0.6095], [-0.3911], [-0.1075], [ 1.4653]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ______________________________ test_gat_block[g0] ______________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) @pytest.mark.parametrize('g', get_cases(['block-bipartite'])) def test_gat_block(g): data_info = { 'in_size': 10, 'out_size': 7 } blocks = [g] node_feat = torch.randn(g.num_src_nodes(), data_info['in_size']) model = GAT(data_info, num_layers=1, heads=[8]) > model.forward_block(blocks, node_feat) tests/go/test_model.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/gat.py:86: in forward_block logits = self.gat_layers[-1](blocks[-1], h).mean(1) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) python/dgl/nn/pytorch/conv/gatconv.py:303: in forward graph.apply_edges(fn.u_add_v('el', 'er', 'e')) python/dgl/heterograph.py:4458: in apply_edges edata = core.invoke_gsddmm(g, func) python/dgl/core.py:266: in invoke_gsddmm z = op(graph, x, y) python/dgl/ops/sddmm.py:128: in func lhs_target=lhs_target, rhs_target=rhs_target) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'add' lhs_data = tensor([[[ 0.4385], [-0.1202], [ 0.6981], [ 0.8333], [ 0.7433], [ 0.2976]....4548], [ 0.1070], [ 0.2085], [-0.5366], [ 0.5327]]], grad_fn=) rhs_data = tensor([[[ 1.7303], [-0.2018], [ 1.4550], [ 0.8330], [ 0.5960], [ 0.8471]....2173], [ 0.3000], [ 0.0604], [ 0.9013], [ 0.8341]]], grad_fn=) lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError _________________________________ test_gin[g0] _________________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'])) def test_gin(g): data_info = { 'num_nodes': g.num_nodes(), 'out_size': 7 } node_feat = None # node embedding model = GIN(data_info, embed_size=10) > model(g, node_feat) tests/go/test_model.py:93: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/gin.py:57: in forward h = self.conv_list[i](graph, h) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) python/dgl/nn/pytorch/conv/ginconv.py:147: in forward graph.update_all(aggregate_fn, _reducer('m', 'neigh')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = Parameter containing: tensor([[-1.1347, -0.3137, -1.1602, -0.8782, -0.1786, -0.5248, 0.2633, 0.5876, 0.931...1.1028, 0.5693, 1.6440, -0.4924, -1.3252, -0.2387, -0.2325, -0.4017, 0.2927, -1.5606]], requires_grad=True) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ________________________________ test_sage[g0] _________________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(7,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'])) def test_sage(g): data_info = { 'num_nodes': g.num_nodes(), 'out_size': 7 } node_feat = None edge_feat = g.edata['scalar_w'] # node embedding model = GraphSAGE(data_info, embed_size=10) > model(g, node_feat) tests/go/test_model.py:112: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/sage.py:58: in forward h = layer(graph, h, edge_feat) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) python/dgl/nn/pytorch/conv/sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[-1.0455, 0.1407, -3.6295, -0.0526, -2.7218, -2.7691, 4.4690], [ 0.9919, 0.8407, 1.4997, 0.4836, ....4458, 1.5004], [-0.7763, 2.2160, 0.0870, -0.4196, 0.8794, 2.8239, -1.2195]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _____________________________ test_sage_block[g0] ______________________________ g = Block(num_src_nodes=6, num_dst_nodes=3, num_edges=3) @pytest.mark.parametrize('g', get_cases(['block-bipartite'])) def test_sage_block(g): data_info = { 'in_size': 10, 'out_size': 7 } blocks = [g] node_feat = torch.randn(g.num_src_nodes(), data_info['in_size']) edge_feat = torch.abs(torch.randn(g.num_edges())) model = GraphSAGE(data_info, embed_size=-1) > model.forward_block(blocks, node_feat) tests/go/test_model.py:133: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/sage.py:67: in forward_block h = layer(block, h, edge_feat) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) python/dgl/nn/pytorch/conv/sageconv.py:249: in forward graph.update_all(msg_fn, fn.sum('m', 'neigh')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.5972, 1.5512, 0.4784, 3.6568, 3.2279, -0.6815, 1.2453], [ 2.3165, 2.3200, 2.0049, 1.9059, ....2783, -2.2715], [-0.6206, 0.4980, 1.8651, -0.6186, -0.2495, -0.8035, 1.8651]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _________________________________ test_sgc[g0] _________________________________ g = Graph(num_nodes=10, num_edges=17, ndata_schemes={'h': Scheme(shape=(10,), dtype=torch.float32)} edata_schemes={'scalar_w': Scheme(shape=(), dtype=torch.float32)}) @pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature'])) def test_sgc(g): data_info = { 'num_nodes': g.num_nodes(), 'out_size': 7 } node_feat = None # node embedding model = SGC(data_info, embed_size=10) > model(g, node_feat) tests/go/test_model.py:146: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/sgc.py:46: in forward return self.sgc(g, h) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) python/dgl/nn/pytorch/conv/sgconv.py:203: in forward fn.sum('m', 'h')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:357: in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:189: in func return gspmm(g, 'copy_lhs', reduce_op, x, None) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs', reduce_op = 'sum' lhs_data = tensor([[ 0.1245, 0.2361, 0.5253, -0.4005, 0.4957, 1.0950, -0.9002, 0.0527, -0.6777, -0.3907], [...55, -0.6413, -0.4899, 0.4337, 0.1490, -0.8998, -0.1092, 0.0496, 0.4883, -0.1608]], grad_fn=) rhs_data = None def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _____________________________ test_ogbg_gin[True] ______________________________ virtual_node = True @pytest.mark.parametrize('virtual_node', [True, False]) def test_ogbg_gin(virtual_node): # Test for ogbg-mol datasets data_info = { 'name': 'ogbg-molhiv', 'out_size': 1 } model = OGBGGIN(data_info, embed_size=10, num_layers=2, virtual_node=virtual_node) num_nodes = 5 num_edges = 15 g1 = dgl.rand_graph(num_nodes, num_edges) g2 = dgl.rand_graph(num_nodes, num_edges) g = dgl.batch([g1, g2]) num_nodes = g.num_nodes() num_edges = g.num_edges() nfeat = torch.zeros(num_nodes, 9).long() efeat = torch.zeros(num_edges, 3).long() > model(g, nfeat, efeat) tests/go/test_model.py:196: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/graph_encoder/gin_ogbg.py:91: in forward hn = self.conv_layers[layer](graph, hn, he) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) python/dgl/nn/pytorch/conv/gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.0000, 0.6656, 0.0000, 0.3696, 0.0000, 0.1483, 0.0000, 0.0000, 0.0039, 0.0000], [0.0000, 0.... [0.0000, 0.6656, 0.0000, 0.3696, 0.0000, 0.1483, 0.0000, 0.0000, 0.0039, 0.0000]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError _____________________________ test_ogbg_gin[False] _____________________________ virtual_node = False @pytest.mark.parametrize('virtual_node', [True, False]) def test_ogbg_gin(virtual_node): # Test for ogbg-mol datasets data_info = { 'name': 'ogbg-molhiv', 'out_size': 1 } model = OGBGGIN(data_info, embed_size=10, num_layers=2, virtual_node=virtual_node) num_nodes = 5 num_edges = 15 g1 = dgl.rand_graph(num_nodes, num_edges) g2 = dgl.rand_graph(num_nodes, num_edges) g = dgl.batch([g1, g2]) num_nodes = g.num_nodes() num_edges = g.num_edges() nfeat = torch.zeros(num_nodes, 9).long() efeat = torch.zeros(num_edges, 3).long() > model(g, nfeat, efeat) tests/go/test_model.py:196: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/graph_encoder/gin_ogbg.py:91: in forward hn = self.conv_layers[layer](graph, hn, he) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) python/dgl/nn/pytorch/conv/gineconv.py:94: in forward graph.update_all(self.message, fn.sum('m', 'neigh')) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:369: in message_passing ndata = invoke_gspmm(g, fn.copy_e(msg, msg), rfunc, edata=msgdata) python/dgl/core.py:332: in invoke_gspmm z = op(graph, x) python/dgl/ops/spmm.py:191: in func return gspmm(g, 'copy_rhs', reduce_op, None, x) python/dgl/ops/spmm.py:77: in gspmm lhs_data, rhs_data) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_rhs', reduce_op = 'sum', lhs_data = None rhs_data = tensor([[0.0000, 1.4420, 0.0000, 1.7451, 0.0000, 0.4129, 0.0000, 1.5422, 1.2173, 0.2543], [0.0000, 1.... [0.0000, 1.4420, 0.0000, 1.7451, 0.0000, 0.4129, 0.0000, 1.5422, 1.2173, 0.2543]], grad_fn=) def gspmm(gidx, op, reduce_op, lhs_data, rhs_data): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, reduce_op, lhs_data, rhs_data) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:720: TypeError ___________________________________ test_pna ___________________________________ def test_pna(): # Test for ogbg-mol datasets data_info = { 'name': 'ogbg-molhiv', 'delta': 1, 'out_size': 1 } model = PNA(data_info, embed_size=10, num_layers=2) num_nodes = 5 num_edges = 15 g = dgl.rand_graph(num_nodes, num_edges) nfeat = torch.zeros(num_nodes, 9).long() > model(g, nfeat) tests/go/test_model.py:227: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/graph_encoder/pna.py:227: in forward hn = conv(graph, hn) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/graph_encoder/pna.py:123: in forward g.update_all(fn.copy_u('h', 'm'), self.reduce) python/dgl/heterograph.py:4895: in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) python/dgl/core.py:362: in message_passing msgdata = invoke_gsddmm(g, mfunc) python/dgl/core.py:276: in invoke_gsddmm z = op(graph, x) python/dgl/ops/sddmm.py:164: in copy_u return gsddmm(g, 'copy_lhs', x, None) python/dgl/ops/sddmm.py:75: in gsddmm g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ gidx = op = 'copy_lhs' lhs_data = tensor([[-1.4288, -0.2643, -0.6708, -0.3757, -0.2398, -0.4878, 1.0679, -0.7565, -1.4319, -1.2884], [...88, -0.2643, -0.6708, -0.3757, -0.2398, -0.4878, 1.0679, -0.7565, -1.4319, -1.2884]], grad_fn=) rhs_data = None, lhs_target = 'u', rhs_target = 'v' def gsddmm(gidx, op, lhs_data, rhs_data, lhs_target='u', rhs_target='v'): if op == 'sub': op = 'add' rhs_data = -rhs_data if op == 'div': op = 'mul' rhs_data = 1. / rhs_data args = _cast_if_autocast_enabled(gidx, op, lhs_data, rhs_data, lhs_target, rhs_target) > with autocast(enabled=False): E TypeError: empty_context() got an unexpected keyword argument 'enabled' python/dgl/backend/pytorch/sparse.py:731: TypeError ________________________________ test_node_cora ________________________________ def test_node_cora(): os.system('dgl configure nodepred --data cora --model gcn') os.system('dgl train --cfg nodepred_cora_gcn.yaml') assert os.path.exists('results') > assert os.path.exists('results/run_0.pth') E AssertionError: assert False E + where False = ('results/run_0.pth') E + where = .exists E + where = os.path tests/go/test_pipeline.py:170: AssertionError ----------------------------- Captured stdout call ----------------------------- Configuration file is generated at /root/jenkins/workspace/dgl_PR-4648@3/nodepred_cora_gcn.yaml Downloading /root/jenkins/workspace/dgl_PR-4648@3/cora_v2.zip from https://data.dgl.ai/dataset/cora_v2.zip... Extracting file to /root/jenkins/workspace/dgl_PR-4648@3/cora_v2 Finished data loading and preprocessing. NumNodes: 2708 NumEdges: 10556 NumFeats: 1433 NumClasses: 7 NumTrainingSamples: 140 NumValidationSamples: 500 NumTestSamples: 1000 Done saving data into cached files. Run experiment #0 ----------------------------- Captured stderr call ----------------------------- WARNING:root:The OGB package is out of date. Your version is 1.3.3, while the latest version is 1.3.4. WARNING:root:The OGB package is out of date. Your version is 1.3.3, while the latest version is 1.3.4. Traceback (most recent call last): File "/opt/conda/envs/pytorch-ci/bin/dgl", line 33, in sys.exit(load_entry_point('dglgo==0.0.2', 'console_scripts', 'dgl')()) File "/opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/cli/cli.py", line 21, in main app() File "/opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/cli/train_cli.py", line 20, in train exec(code, {'__name__': '__main__'}) File "dglgo_tmp.py", line 248, in File "dglgo_tmp.py", line 192, in main File "dglgo_tmp.py", line 156, in train File "/opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl return forward_call(*input, **kwargs) File "dglgo_tmp.py", line 78, in forward File "/opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl return forward_call(*input, **kwargs) File "/root/jenkins/workspace/dgl_PR-4648@3/python/dgl/nn/pytorch/conv/graphconv.py", line 423, in forward graph.update_all(aggregate_fn, fn.sum(msg='m', out='h')) File "/root/jenkins/workspace/dgl_PR-4648@3/python/dgl/heterograph.py", line 4895, in update_all ndata = core.message_passing(g, message_func, reduce_func, apply_node_func) File "/root/jenkins/workspace/dgl_PR-4648@3/python/dgl/core.py", line 357, in message_passing ndata = invoke_gspmm(g, mfunc, rfunc) File "/root/jenkins/workspace/dgl_PR-4648@3/python/dgl/core.py", line 332, in invoke_gspmm z = op(graph, x) File "/root/jenkins/workspace/dgl_PR-4648@3/python/dgl/ops/spmm.py", line 189, in func return gspmm(g, 'copy_lhs', reduce_op, x, None) File "/root/jenkins/workspace/dgl_PR-4648@3/python/dgl/ops/spmm.py", line 77, in gspmm lhs_data, rhs_data) File "/root/jenkins/workspace/dgl_PR-4648@3/python/dgl/backend/pytorch/sparse.py", line 720, in gspmm with autocast(enabled=False): TypeError: empty_context() got an unexpected keyword argument 'enabled' =============================== warnings summary =============================== python/dgl/backend/backend.py:1741 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/backend/backend.py:1741: DeprecationWarning: invalid escape sequence \P """ python/dgl/backend/pytorch/tensor.py:16 python/dgl/backend/pytorch/tensor.py:16 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/backend/pytorch/tensor.py:16: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) < LooseVersion("1.9.0"): python/dgl/backend/pytorch/tensor.py:340 python/dgl/backend/pytorch/tensor.py:340 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/backend/pytorch/tensor.py:340: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(th.__version__) >= LooseVersion("1.10.0"): python/dgl/dataloading/dataloader.py:33 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/dataloading/dataloader.py:33: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(torch.__version__) python/dgl/_dataloading/pytorch/dataloader.py:23 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/_dataloading/pytorch/dataloader.py:23: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_VER = LooseVersion(th.__version__) python/dgl/_dataloading/pytorch/dataloader.py:24 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/_dataloading/pytorch/dataloader.py:24: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_16 = PYTORCH_VER >= LooseVersion("1.6.0") python/dgl/_dataloading/pytorch/dataloader.py:25 /root/jenkins/workspace/dgl_PR-4648@3/python/dgl/_dataloading/pytorch/dataloader.py:25: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. PYTORCH_17 = PYTORCH_VER >= LooseVersion("1.7.0") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _nlv = LooseVersion(_np_version) ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p16 = _nlv < LooseVersion("1.16") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p17 = _nlv < LooseVersion("1.17") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p18 = _nlv < LooseVersion("1.18") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p19 = _nlv < LooseVersion("1.19") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p20 = _nlv < LooseVersion("1.20") ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. other = LooseVersion(other) ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 ../../../../opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(_np_version) >= LooseVersion("1.17.0"): tests/go/test_model.py::test_gcn[g0] /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/gcn.py:59: DGLWarning: The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size. dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.") tests/go/test_model.py::test_gat[g0] /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/gat.py:72: DGLWarning: The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size. "The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.") tests/go/test_model.py::test_gin[g0] /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/gin.py:52: DGLWarning: The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size. "The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.") tests/go/test_model.py::test_sage[g0] /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/sage.py:52: DGLWarning: The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size. dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.") tests/go/test_model.py::test_sgc[g0] /opt/conda/envs/pytorch-ci/lib/python3.7/site-packages/dglgo-0.0.2-py3.7.egg/dglgo/model/node_encoder/sgc.py:42: DGLWarning: The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size. dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.") -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html --- generated xml file: /root/jenkins/workspace/dgl_PR-4648@3/pytest_go.xml ---- ============================ slowest 100 durations ============================= 8.67s call tests/go/test_pipeline.py::test_graphpred[pna-csv] 8.22s call tests/go/test_pipeline.py::test_graphpred[pna-ogbg-molhiv] 8.08s call tests/go/test_pipeline.py::test_graphpred[pna-ogbg-molpcba] 7.81s call tests/go/test_pipeline.py::test_graphpred[gin-ogbg-molpcba] 7.79s call tests/go/test_pipeline.py::test_nodepred_ns_data[citeseer] 7.73s call tests/go/test_pipeline.py::test_graphpred[gin-ogbg-molhiv] 7.56s call tests/go/test_pipeline.py::test_nodepred_data[csv] 7.53s call tests/go/test_pipeline.py::test_nodepred_data[citeseer] 7.44s call tests/go/test_pipeline.py::test_nodepred_model[gat] 7.37s call tests/go/test_pipeline.py::test_nodepred_data[pubmed] 7.32s call tests/go/test_pipeline.py::test_nodepred_ns_data[cora] 7.28s call tests/go/test_pipeline.py::test_graphpred[gin-csv] 6.89s call tests/go/test_pipeline.py::test_nodepred_ns_data[csv] 6.81s call tests/go/test_pipeline.py::test_nodepred_model[sage] 6.80s call tests/go/test_pipeline.py::test_nodepred_ns_data[ogbn-arxiv] 6.70s call tests/go/test_pipeline.py::test_nodepred_ns_model[gat] 6.66s call tests/go/test_pipeline.py::test_nodepred_ns_data[co-buy-computer] 6.62s call tests/go/test_pipeline.py::test_nodepred_ns_data[pubmed] 6.62s call tests/go/test_pipeline.py::test_nodepred_model[gcn] 6.60s call tests/go/test_pipeline.py::test_nodepred_ns_model[gcn] 6.59s call tests/go/test_pipeline.py::test_nodepred_model[gin] 6.59s call tests/go/test_pipeline.py::test_nodepred_data[ogbn-arxiv] 6.55s call tests/go/test_pipeline.py::test_nodepred_data[reddit] 6.55s call tests/go/test_pipeline.py::test_nodepred_ns_data[reddit] 6.54s call tests/go/test_pipeline.py::test_nodepred_data[cora] 6.52s call tests/go/test_pipeline.py::test_nodepred_data[ogbn-products] 6.51s call tests/go/test_pipeline.py::test_nodepred_ns_model[sage] 6.50s call tests/go/test_pipeline.py::test_node_cora 6.50s call tests/go/test_pipeline.py::test_nodepred_model[sgc] 6.47s call tests/go/test_pipeline.py::test_nodepred_data[co-buy-computer] 6.47s call tests/go/test_pipeline.py::test_nodepred_ns_data[ogbn-products] 5.51s call tests/go/test_pipeline.py::test_linkpred_edge_model[bilinear] 5.42s call tests/go/test_pipeline.py::test_linkpred_node_model[gin] 5.32s call tests/go/test_pipeline.py::test_linkpred_neg_sampler[global] 5.32s call tests/go/test_pipeline.py::test_linkpred_neg_sampler[persource] 5.26s call tests/go/test_pipeline.py::test_linkpred_node_model[sage] 5.23s call tests/go/test_pipeline.py::test_linkpred_data[ogbl-citation2] 5.22s call tests/go/test_pipeline.py::test_linkpred_neg_sampler[] 5.19s call tests/go/test_pipeline.py::test_linkpred_node_model[sgc] 5.13s call tests/go/test_pipeline.py::test_linkpred_node_model[gat] 5.09s call tests/go/test_pipeline.py::test_linkpred_data[ogbn-products] 5.09s call tests/go/test_pipeline.py::test_linkpred_data[citeseer] 5.08s call tests/go/test_pipeline.py::test_linkpred_data[ogbn-arxiv] 5.07s call tests/go/test_pipeline.py::test_linkpred_data[pubmed] 5.02s call tests/go/test_pipeline.py::test_linkpred_data[reddit] 5.00s call tests/go/test_pipeline.py::test_linkpred_node_model[gcn] 4.99s call tests/go/test_pipeline.py::test_linkpred_data[csv] 4.98s call tests/go/test_pipeline.py::test_linkpred_data[cora] 4.96s call tests/go/test_pipeline.py::test_linkpred_edge_model[ele] 4.96s call tests/go/test_pipeline.py::test_linkpred_data[co-buy-computer] 4.91s call tests/go/test_pipeline.py::test_linkpred_data[ogbl-collab] 3.02s call tests/go/test_pipeline.py::test_recipe[nodepred-ns_product_sage.yaml] 2.79s call tests/go/test_pipeline.py::test_recipe[nodepred-ns_arxiv_gcn.yaml] 2.71s call tests/go/test_pipeline.py::test_recipe[nodepred_citeseer_sage.yaml] 2.45s call tests/go/test_pipeline.py::test_recipe[graphpred_pcba_gin.yaml] 2.43s call tests/go/test_pipeline.py::test_recipe[linkpred_collab_sage.yaml] 2.43s call tests/go/test_pipeline.py::test_recipe[nodepred_cora_gcn.yaml] 2.41s call tests/go/test_pipeline.py::test_recipe[nodepred_citeseer_gcn.yaml] 2.41s call tests/go/test_pipeline.py::test_recipe[linkpred_cora_sage.yaml] 2.37s call tests/go/test_pipeline.py::test_recipe[graphpred_hiv_gin.yaml] 2.35s call tests/go/test_pipeline.py::test_recipe[nodepred_citeseer_gat.yaml] 2.35s call tests/go/test_pipeline.py::test_recipe[nodepred_cora_sage.yaml] 2.34s call tests/go/test_pipeline.py::test_recipe[nodepred_cora_gat.yaml] 2.31s call tests/go/test_pipeline.py::test_recipe[nodepred_pubmed_sage.yaml] 2.30s call tests/go/test_pipeline.py::test_recipe[graphpred_hiv_pna.yaml] 2.30s call tests/go/test_pipeline.py::test_recipe[nodepred_pubmed_gat.yaml] 2.29s call tests/go/test_pipeline.py::test_recipe[nodepred_pubmed_gcn.yaml] 2.22s call tests/go/test_pipeline.py::test_recipe[linkpred_citation2_sage.yaml] 0.03s call tests/go/test_model.py::test_gat_block[g0] 0.03s call tests/go/test_model.py::test_gcn_block[g0] 0.03s call tests/go/test_model.py::test_gcn[g0] 0.01s call tests/go/test_model.py::test_ogbg_gin[True] 0.01s call tests/go/test_model.py::test_gat[g0] 0.01s call tests/go/test_model.py::test_bilinear (26 durations < 0.005s hidden. Use -vv to show these durations.) =========================== short test summary info ============================ FAILED tests/go/test_model.py::test_gcn[g0] - TypeError: empty_context() got ... FAILED tests/go/test_model.py::test_gcn_block[g0] - TypeError: empty_context(... FAILED tests/go/test_model.py::test_gat[g0] - TypeError: empty_context() got ... FAILED tests/go/test_model.py::test_gat_block[g0] - TypeError: empty_context(... FAILED tests/go/test_model.py::test_gin[g0] - TypeError: empty_context() got ... FAILED tests/go/test_model.py::test_sage[g0] - TypeError: empty_context() got... FAILED tests/go/test_model.py::test_sage_block[g0] - TypeError: empty_context... FAILED tests/go/test_model.py::test_sgc[g0] - TypeError: empty_context() got ... FAILED tests/go/test_model.py::test_ogbg_gin[True] - TypeError: empty_context... FAILED tests/go/test_model.py::test_ogbg_gin[False] - TypeError: empty_contex... FAILED tests/go/test_model.py::test_pna - TypeError: empty_context() got an u... FAILED tests/go/test_pipeline.py::test_node_cora - AssertionError: assert False ============ 12 failed, 69 passed, 23 warnings in 366.88s (0:06:06) ============ PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp2-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp2-g1] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[int64] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp3-g0] FAIL: go [Pipeline] } [Pipeline] // timeout [Pipeline] } [Pipeline] // stage Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp3-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp4-g0] [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 39027e0569c91f60d197e31fb8f36454bc8a908bd28bdc225aa7c1adb21f2213 PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-v-shp4-g1] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp0-g0] PASSED [ 66%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp0-g1] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[int32] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp1-g1] $ docker rm -f 39027e0569c91f60d197e31fb8f36454bc8a908bd28bdc225aa7c1adb21f2213 PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp2-g1] [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } [Pipeline] // stage [Pipeline] } Failed in branch DGL-Go PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp3-g1] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[int64] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-u-e-shp4-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp2-g0] PASSED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[int32] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp3-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp3-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp4-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-u-shp4-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp0-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp0-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp1-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp1-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp2-g0] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp2-g1] PASSED [ 67%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-v-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp0-g0] PASSED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[int64] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp0-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp1-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp2-g0] PASSED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[int32] PASSED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[int64] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp2-g1] PASSED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[int32] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp4-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-v-e-shp4-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp0-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp0-g1] PASSED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[int64] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp1-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp1-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp2-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp2-g1] PASSED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[int32] PASSED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[int64] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp3-g0] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp3-g1] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp4-g0] PASSED [ 95%] tests/compute/test_subgraph.py::test_edge_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[int32] SKIPPED (M...) [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[int64] SKIPPED (M...) [ 95%] tests/compute/test_subgraph.py::test_subgraph1[int32] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-u-shp4-g1] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph1[int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[int32] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp0-g0] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[int32] PASSED [ 68%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp0-g1] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[int64] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp1-g0] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_message_passing PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[int32] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp2-g0] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[int64] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp2-g1] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[int32] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp3-g1] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[int64] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp4-g0] PASSED [ 95%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device1] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device2] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device0-parent_idx_device3] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device1] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device2] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[child_device1-parent_idx_device3] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[int32-device0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-v-shp4-g1] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[int32-device1] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[int64-device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[int64-device1] SKIPPED [ 96%] tests/compute/test_transform.py::test_line_graph1 PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[int32] PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[int64] PASSED [ 96%] tests/compute/test_transform.py::test_no_backtracking PASSED [ 96%] tests/compute/test_transform.py::test_reverse[int32] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp2-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp3-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp3-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp4-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-add-e-e-shp4-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp0-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp0-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp1-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp1-g1] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp2-g0] PASSED [ 69%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-u-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp0-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp1-g1] PASSED [ 96%] tests/compute/test_transform.py::test_reverse[int64] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp2-g1] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[int32] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[int64] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp3-g0] PASSED [ 96%] tests/compute/test_transform.py::test_to_bidirected PASSED [ 96%] tests/compute/test_transform.py::test_add_reverse_edges PASSED [ 96%] tests/compute/test_transform.py::test_simple_graph PASSED [ 96%] tests/compute/test_transform.py::test_khop_graph PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp3-g1] PASSED [ 96%] tests/compute/test_transform.py::test_khop_adj PASSED [ 96%] tests/compute/test_transform.py::test_laplacian_lambda_max PASSED [ 97%] tests/compute/test_transform.py::test_partition_with_halo PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp4-g0] PASSED [ 97%] tests/compute/test_transform.py::test_metis_partition[int32] PASSED [ 97%] tests/compute/test_transform.py::test_metis_partition[int64] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-v-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp0-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp0-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp1-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp1-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp2-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp2-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp3-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp3-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp4-g0] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-u-e-shp4-g1] PASSED [ 70%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-u-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp2-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp2-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp3-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp3-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp4-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-v-shp4-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp0-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp0-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp1-g0] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp1-g1] PASSED [ 71%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-v-e-shp4-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp0-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp1-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-u-shp4-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp0-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp0-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp1-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp1-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp2-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp2-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp3-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp3-g1] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp4-g0] PASSED [ 72%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-v-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-sub-e-e-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp1-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp2-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp2-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp3-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp3-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp4-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-u-shp4-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp0-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp0-g1] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp1-g0] PASSED [ 73%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp4-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-v-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp0-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp4-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-u-e-shp4-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp0-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp0-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp1-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp1-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp2-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp2-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp3-g0] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp3-g1] PASSED [ 74%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-u-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp3-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-v-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp0-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp1-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp2-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp2-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp3-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp3-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp4-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-v-e-shp4-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp0-g0] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_nodes PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp0-g1] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp1-g0] PASSED [ 75%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp3-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp4-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-u-shp4-g1] PASSED [ 97%] tests/compute/test_transform.py::test_compact[int32] PASSED [ 97%] tests/compute/test_transform.py::test_compact[int64] PASSED [ 97%] tests/compute/test_transform.py::test_to_simple[int32] PASSED [ 97%] tests/compute/test_transform.py::test_to_simple[int64] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp0-g0] PASSED [ 97%] tests/compute/test_transform.py::test_to_block[int32] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp0-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp3-g1] PASSED [ 97%] tests/compute/test_transform.py::test_to_block[int64] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp4-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-v-shp4-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp0-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp0-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp1-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp1-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp2-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp2-g1] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp3-g0] PASSED [ 76%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-mul-e-e-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp1-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp2-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp2-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp3-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp3-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp4-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-u-shp4-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp0-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp0-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp1-g0] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp1-g1] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp2-g0] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[int32] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp2-g1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[int64] PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[int32] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp3-g0] PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[int64] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[int32] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[int64] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[int32] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp3-g1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[int64] PASSED [ 97%] tests/compute/test_transform.py::test_add_selfloop[int32] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp4-g0] PASSED [ 97%] tests/compute/test_transform.py::test_add_selfloop[int64] PASSED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[int32] PASSED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[int64] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-v-shp4-g1] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[int32] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[int64] PASSED [ 98%] tests/compute/test_transform.py::test_norm_by_dst[int32] PASSED [ 98%] tests/compute/test_transform.py::test_norm_by_dst[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[int64] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp0-g0] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_to_simple[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_to_simple[int64] PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp0-g1] PASSED [ 98%] tests/compute/test_transform.py::test_module_line_graph[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_line_graph[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[int64] PASSED [ 98%]PASSED [ 77%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp1-g0] tests/compute/test_transform.py::test_module_gcnnorm[int32] PASSED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[int64] PASSED [ 98%] tests/compute/test_transform.py::test_module_ppr[int32] SKIPPED (Onl...) [ 98%] tests/compute/test_transform.py::test_module_ppr[int64] SKIPPED (Onl...) [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[int32] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[int64] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_gdc[int32] SKIPPED (Onl...) [ 99%] tests/compute/test_transform.py::test_module_gdc[int64] SKIPPED (Onl...) [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[int32] PASSED [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[int64] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_node[int32] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_node[int64] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[int32] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[int64] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_add_edge[int32] PASSED [ 99%] tests/compute/test_transform.py::test_module_add_edge[int64] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[int32] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[int64] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[int32] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[int64] PASSED [ 99%] tests/compute/test_transform.py::test_module_sign[g0] SKIPPED (Only ...) [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[int32] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[int64] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[int32] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[int64] SKIPPED [ 99%] tests/compute/test_traversal.py::test_bfs[int32] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp3-g1] PASSED [ 99%] tests/compute/test_traversal.py::test_bfs[int64] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp4-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-u-e-shp4-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp3-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp3-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp4-g0] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[int32] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[int64] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[int32] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[int64] PASSED [100%] =============================== warnings summary =============================== python/dgl/backend/backend.py:1741 /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/backend/backend.py:1741: DeprecationWarning: invalid escape sequence \P """ tests/compute/test_basics.py: 2 warnings tests/compute/test_filter.py: 1 warning tests/compute/test_graph.py: 9 warnings tests/compute/test_kernel.py: 3 warnings tests/compute/test_propagate.py: 2 warnings tests/compute/test_removal.py: 16 warnings tests/compute/test_serialize.py: 3 warnings tests/compute/test_specialization.py: 12 warnings tests/compute/test_subgraph.py: 2 warnings tests/compute/test_transform.py: 10 warnings tests/compute/test_traversal.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/compute/test_basics.py: 2 warnings tests/compute/test_batched_graph.py: 10 warnings tests/compute/test_graph.py: 2 warnings tests/compute/test_kernel.py: 1 warning tests/compute/test_propagate.py: 2 warnings tests/compute/test_removal.py: 10 warnings tests/compute/test_specialization.py: 10 warnings tests/compute/test_subgraph.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:354: DGLWarning: DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges dgl_warning("DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges") tests/compute/test_basics.py::test_update_all_0deg[int32] tests/compute/test_basics.py::test_update_all_0deg[int64] tests/compute/test_basics.py::test_pull_0deg[int32] tests/compute/test_basics.py::test_pull_0deg[int64] tests/compute/test_propagate.py::test_prop_nodes_topo[int32] tests/compute/test_propagate.py::test_prop_nodes_topo[int64] /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/core.py:79: DGLWarning: The input graph for the user-defined edge function does not contain valid edges dgl_warning('The input graph for the user-defined edge function ' \ tests/compute/test_batched_graph.py::test_batched_edge_ordering[int32] tests/compute/test_batched_graph.py::test_batched_edge_ordering[int64] tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_transform.py::test_no_backtracking tests/compute/test_transform.py::test_reverse[int32] tests/compute/test_transform.py::test_reverse[int64] /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:2978: DGLWarning: DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids. dgl_warning("DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids.") tests/compute/test_batched_heterograph.py::test_features[int32] tests/compute/test_batched_heterograph.py::test_features[int64] /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/batch.py:159: DGLWarning: Arguments edge_attrs has been deprecated. Please use edata instead. dgl_warning('Arguments edge_attrs has been deprecated. Please use' tests/compute/test_csrmm.py::test_csrmm[float32-int32] tests/compute/test_csrmm.py::test_csrmm[float32-int64] tests/compute/test_csrmm.py::test_csrmm[float64-int32] tests/compute/test_csrmm.py::test_csrmm[float64-int64] tests/compute/test_csrmm.py::test_csrsum[float32-int32] tests/compute/test_csrmm.py::test_csrsum[float32-int64] tests/compute/test_csrmm.py::test_csrsum[float64-int32] tests/compute/test_csrmm.py::test_csrsum[float64-int64] /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph_index.py:797: FutureWarning: Adjacency matrix by default currently returns edge IDs. As a result there is one 0 entry which is not eliminated. In the next release it will return 1s by default, and 0 will be eliminated otherwise. FutureWarning) tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_heterograph.py::test_query[int32] tests/compute/test_heterograph.py::test_query[int64] /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:2753: DGLWarning: DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes dgl_warning("DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:2687: DGLWarning: DGLGraph.__contains__ is deprecated. Please directly call has_nodes. dgl_warning('DGLGraph.__contains__ is deprecated.' tests/compute/test_graph.py::test_query tests/compute/test_sampling.py::test_non_uniform_random_walk[False] tests/compute/test_sampling.py::test_uniform_random_walk[False] tests/compute/test_sampling.py::test_node2vec tests/compute/test_transform.py::test_no_backtracking /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:2851: DGLWarning: DGLGraph.has_edge_between is deprecated. Please use DGLGraph.has_edges_between dgl_warning("DGLGraph.has_edge_between is deprecated. " tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:3432: DGLWarning: DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees dgl_warning("DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:3516: DGLWarning: DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees dgl_warning("DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees") tests/compute/test_graph.py::test_query /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly', 'sort_csr'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648@4/tests/compute/test_heterograph.py:1128: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(src_i)) == nid[src[i]] tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648@4/tests/compute/test_heterograph.py:1129: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(dst_i)) == nid[dst[i]] tests/compute/test_heterograph.py::test_invertible_conversion[int32] tests/compute/test_heterograph.py::test_invertible_conversion[int64] tests/compute/test_shared_mem.py::test_single_process[int32] tests/compute/test_shared_mem.py::test_single_process[int64] /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:2635: DGLWarning: DGLGraph.is_readonly is deprecated in v0.5. DGLGraph now always supports mutable operations like add_nodes and add_edges. dgl_warning('DGLGraph.is_readonly is deprecated in v0.5.\n' tests/compute/test_kernel.py: 3 warnings tests/compute/test_sparse.py: 290 warnings /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/backend/mxnet/sparse.py:17: DGLWarning: MXNet do not support scatter_add, fallback to numpy. dgl_warning("MXNet do not support scatter_add, fallback to numpy.") tests/compute/test_pickle.py::test_pickling_batched_heterograph /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/batch.py:511: DGLWarning: From v0.5, DGLHeteroGraph is merged into DGLGraph. You can safely replace dgl.batch_hetero with dgl.batch dgl_warning('From v0.5, DGLHeteroGraph is merged into DGLGraph. You can safely' tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/contrib/sampling/sampler.py:317: DGLWarning: dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5.' tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_layer_sampler tests/compute/test_sampler.py::test_nonuniform_neighbor_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/_deprecate/nodeflow.py:99: DGLWarning: NodeFlow APIs are deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('NodeFlow APIs are deprecated starting from v0.5. Please read our' tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/_ffi/_ctypes/function.py:116: ComplexWarning: Casting complex values to real discards the imaginary part values[i].v_float64 = arg tests/compute/test_serialize.py::test_graph_serialize_with_feature[False] tests/compute/test_serialize.py::test_graph_serialize_without_feature[False] tests/compute/test_serialize.py::test_graph_serialize_with_labels[False] tests/compute/test_transform.py::test_simple_graph /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) tests/compute/test_serialize.py::test_load_old_files1 tests/compute/test_serialize.py::test_load_old_files2 /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/data/graph_serialize.py:179: DGLWarning: You are loading a graph file saved by old version of dgl. Please consider saving it again with the current format. Please consider saving it again with the current format.") tests/compute/test_transform.py::test_reverse_shared_frames[int32] tests/compute/test_transform.py::test_reverse_shared_frames[int64] /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/transforms/functional.py:1267: DGLWarning: share_ndata argument has been renamed to copy_ndata. dgl_warning('share_ndata argument has been renamed to copy_ndata.') tests/compute/test_transform.py::test_reverse_shared_frames[int32] tests/compute/test_transform.py::test_reverse_shared_frames[int64] /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/transforms/functional.py:1270: DGLWarning: share_edata argument has been renamed to copy_edata. dgl_warning('share_edata argument has been renamed to copy_edata.') tests/compute/test_transform.py::test_simple_graph /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/transforms/functional.py:1319: DGLWarning: dgl.to_simple_graph is renamed to dgl.to_simple in v0.5. dgl_warning('dgl.to_simple_graph is renamed to dgl.to_simple in v0.5.') -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html - generated xml file: /root/jenkins/workspace/dgl_PR-4648@4/pytest_compute.xml - ============================ slowest 100 durations ============================= 133.49s call tests/compute/test_kernel.py::test_all_binary_builtins 62.39s call tests/compute/test_sampler.py::test_negative_sampler 20.05s call tests/compute/test_transform.py::test_metis_partition[int64] 6.88s call tests/compute/test_heterograph.py::test_query[int64] 5.04s call tests/compute/test_kernel.py::test_copy_src_reduce 4.42s call tests/compute/test_sampling.py::test_non_uniform_random_walk[False] 4.36s call tests/compute/test_heterograph.py::test_level2[int64] 4.31s call tests/compute/test_transform.py::test_reorder_nodes 4.04s call tests/compute/test_graph.py::test_query 3.73s call tests/compute/test_kernel.py::test_copy_edge_reduce 3.07s call tests/compute/test_sampling.py::test_sample_neighbors_outedge 2.76s call tests/compute/test_sampling.py::test_uniform_random_walk[False] 2.62s call tests/compute/test_heterograph.py::test_query[int32] 2.58s call tests/compute/test_heterograph.py::test_level2[int32] 2.47s call tests/compute/test_specialization.py::test_v2v_snr[int32] 2.44s call tests/compute/test_heterograph.py::test_updates[int64] 2.36s call tests/compute/test_heterograph.py::test_view1[int32] 2.13s call tests/compute/test_heterograph.py::test_view1[int64] 2.11s call tests/compute/test_sampling.py::test_sample_neighbors_prob 2.09s call tests/compute/test_sampling.py::test_sample_neighbors_noprob 2.08s call tests/compute/test_heterograph.py::test_updates[int32] 2.04s call tests/compute/test_traversal.py::test_bfs[int64] 1.94s call tests/compute/test_specialization.py::test_v2v_update_all[int64] 1.93s call tests/compute/test_transform.py::test_to_block[int32] 1.86s call tests/compute/test_specialization.py::test_v2v_snr[int64] 1.78s call tests/compute/test_transform.py::test_to_block[int64] 1.63s call tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp0-g1] 1.51s call tests/compute/test_sampler.py::test_10neighbor_sampler 1.48s call tests/compute/test_specialization.py::test_pull_multi_fallback[int32] 1.39s call tests/compute/test_traversal.py::test_bfs[int32] 1.25s call tests/compute/test_heterograph.py::test_flatten[int64] 1.21s call tests/compute/test_removal.py::test_node_and_edge_removal[int64] 1.14s call tests/compute/test_specialization.py::test_v2v_pull[int32] 1.06s call tests/compute/test_sampler.py::test_prefetch_neighbor_sampler 0.96s call tests/compute/test_specialization.py::test_v2v_update_all[int32] 0.96s call tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp2-g1] 0.92s call tests/compute/test_sampling.py::test_node2vec 0.92s call tests/compute/test_removal.py::test_node_and_edge_removal[int32] 0.91s call tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp2-g1] 0.90s call tests/compute/test_specialization.py::test_v2v_pull[int64] 0.89s call tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp1-g1] 0.87s call tests/compute/test_batched_graph.py::test_batch_propagate[int64] 0.87s call tests/compute/test_sparse.py::test_spmm[int32-sum-copy_lhs-shp3-g1] 0.85s call tests/compute/test_sampler.py::test_1neighbor_sampler 0.84s call tests/compute/test_sparse.py::test_sddmm[int32-mul-u-v-shp3-g1] 0.76s call tests/compute/test_sampler.py::test_1neighbor_sampler_all 0.76s call tests/compute/test_heterograph.py::test_convert[int32] 0.74s call tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp2-g1] 0.72s call tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp2-g1] 0.69s call tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp5-g1] 0.69s call tests/compute/test_sparse.py::test_spmm[int32-max-div-shp2-g1] 0.69s call tests/compute/test_sparse.py::test_spmm[int32-max-add-shp3-g1] 0.68s call tests/compute/test_transform.py::test_reverse[int32] 0.68s call tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp0-g1] 0.67s call tests/compute/test_specialization.py::test_pull_multi_fallback[int64] 0.66s call tests/compute/test_heterograph.py::test_flatten[int32] 0.65s call tests/compute/test_shared_mem.py::test_multi_process[int64] 0.64s call tests/compute/test_propagate.py::test_prop_edges_dfs[int64] 0.64s call tests/compute/test_sparse.py::test_spmm[int64-sum-copy_lhs-shp3-g1] 0.63s call tests/compute/test_sparse.py::test_spmm[int64-sum-mul-shp2-g0] 0.63s call tests/compute/test_sparse.py::test_spmm[int32-max-add-shp2-g1] 0.63s call tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp2-g1] 0.62s call tests/compute/test_sparse.py::test_spmm[int32-max-add-shp4-g1] 0.61s call tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp4-g1] 0.60s call tests/compute/test_sparse.py::test_spmm[int32-min-add-shp4-g1] 0.60s call tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp2-g1] 0.60s call tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp0-g1] 0.60s call tests/compute/test_sparse.py::test_spmm[int32-max-sub-shp1-g1] 0.60s call tests/compute/test_sparse.py::test_spmm[int64-max-sub-shp0-g1] 0.60s call tests/compute/test_sparse.py::test_spmm[int64-sum-add-shp2-g1] 0.59s call tests/compute/test_sparse.py::test_spmm[int32-min-div-shp4-g1] 0.59s call tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp1-g1] 0.59s call tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp2-g1] 0.59s call tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp0-g1] 0.59s call tests/compute/test_sparse.py::test_spmm[int32-max-div-shp0-g1] 0.58s call tests/compute/test_sparse.py::test_spmm[int32-min-div-shp3-g1] 0.58s call tests/compute/test_propagate.py::test_prop_edges_dfs[int32] 0.58s call tests/compute/test_sparse.py::test_spmm[int32-sum-add-shp2-g0] 0.58s call tests/compute/test_sparse.py::test_spmm[int32-max-add-shp1-g1] 0.58s call tests/compute/test_sparse.py::test_spmm[int32-min-add-shp2-g1] 0.58s call tests/compute/test_sparse.py::test_spmm[int64-min-add-shp2-g1] 0.57s call tests/compute/test_sparse.py::test_spmm[int32-max-mul-shp5-g1] 0.57s call tests/compute/test_sparse.py::test_spmm[int64-max-mul-shp1-g1] 0.57s call tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp3-g1] 0.56s call tests/compute/test_heterograph.py::test_convert[int64] 0.56s call tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp4-g1] 0.55s call tests/compute/test_sparse.py::test_spmm[int32-min-mul-shp0-g1] 0.55s call tests/compute/test_sparse.py::test_spmm[int32-sum-mul-shp2-g1] 0.55s call tests/compute/test_sparse.py::test_spmm[int64-min-add-shp5-g1] 0.55s call tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp5-g1] 0.55s call tests/compute/test_sparse.py::test_spmm[int64-max-copy_lhs-shp4-g1] 0.55s call tests/compute/test_sparse.py::test_spmm[int64-min-mul-shp4-g1] 0.55s call tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp5-g1] 0.55s call tests/compute/test_sparse.py::test_spmm[int32-min-sub-shp1-g1] 0.55s call tests/compute/test_sparse.py::test_spmm[int32-max-div-shp1-g1] 0.55s call tests/compute/test_sparse.py::test_spmm[int64-sum-div-shp0-g1] 0.55s call tests/compute/test_sparse.py::test_sddmm[int64-copy_rhs-u-e-shp2-g1] 0.54s call tests/compute/test_batched_graph.py::test_batch_propagate[int32] 0.54s call tests/compute/test_basics.py::test_update_routines[int32] 0.54s call tests/compute/test_batched_graph.py::test_batch_send_and_recv[int32] ========= 2284 passed, 169 skipped, 509 warnings in 684.92s (0:11:24) ========== Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-u-shp4-g1] Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: Error in sys.excepthook: Original exception was: PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp0-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp0-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp1-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp1-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp2-g0] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp2-g1] PASSED [ 78%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-v-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp0-g0] ============================= test session starts ============================== platform linux -- Python 3.6.9, pytest-7.0.1, pluggy-1.0.0 -- /opt/conda/envs/mxnet-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648@4 collecting ... PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp1-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp2-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp2-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp3-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp3-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp4-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-v-e-shp4-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp0-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp0-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp1-g0] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp1-g1] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp2-g0] collected 806 items tests/mxnet/test_geometry.py::test_fps PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv[1-int32] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv[1-int64] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv[2-int32] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp2-g1] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv[2-int64] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g0-int32] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g0-int64] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp3-g0] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g1-int32] PASSED [ 0%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g1-int64] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g2-int32] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g2-int64] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g3-int32] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g3-int64] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g4-int32] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g4-int64] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g5-int32] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp3-g1] PASSED [ 1%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g5-int64] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g6-int32] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-none-g6-int64] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g0-int32] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g0-int64] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g1-int32] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g1-int64] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g2-int32] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp4-g0] PASSED [ 2%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g2-int64] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g3-int32] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g3-int64] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g4-int32] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g4-int64] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g5-int32] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g5-int64] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-u-shp4-g1] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g6-int32] PASSED [ 3%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-both-g6-int64] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g0-int32] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g0-int64] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g1-int32] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g1-int64] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g2-int32] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g2-int64] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g3-int32] PASSED [ 4%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g3-int64] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g4-int32] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g4-int64] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp0-g0] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g5-int32] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g5-int64] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g6-int32] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-right-g6-int64] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g0-int32] PASSED [ 5%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g0-int64] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g1-int32] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g1-int64] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g2-int32] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g2-int64] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g3-int32] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g3-int64] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g4-int32] PASSED [ 6%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g4-int64] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g5-int32] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g5-int64] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g6-int32] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-True-left-g6-int64] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g0-int32] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g0-int64] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g1-int32] PASSED [ 7%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g1-int64] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g2-int32] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g2-int64] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g3-int32] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g3-int64] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g4-int32] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g4-int64] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g5-int32] PASSED [ 8%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g5-int64] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g6-int32] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-none-g6-int64] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g0-int32] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g0-int64] PASSED [ 79%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp0-g1] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g1-int32] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g1-int64] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g2-int32] PASSED [ 9%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g2-int64] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g3-int32] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g3-int64] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g4-int32] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g4-int64] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g5-int32] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g5-int64] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g6-int32] PASSED [ 10%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-both-g6-int64] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g0-int32] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp1-g0] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g0-int64] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g1-int32] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g1-int64] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g2-int32] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g2-int64] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g3-int32] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp1-g1] PASSED [ 11%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g3-int64] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g4-int32] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g4-int64] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g5-int32] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g5-int64] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g6-int32] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-right-g6-int64] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g0-int32] PASSED [ 12%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g0-int64] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g1-int32] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g1-int64] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g2-int32] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g2-int64] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g3-int32] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g3-int64] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g4-int32] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp2-g0] PASSED [ 13%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g4-int64] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g5-int32] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g5-int64] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g6-int32] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[1-False-False-left-g6-int64] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g0-int32] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g0-int64] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g1-int32] PASSED [ 14%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g1-int64] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g2-int32] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g2-int64] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g3-int32] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g3-int64] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g4-int32] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g4-int64] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g5-int32] PASSED [ 15%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g5-int64] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g6-int32] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-none-g6-int64] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp2-g1] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g0-int32] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g0-int64] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g1-int32] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g1-int64] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g2-int32] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g2-int64] PASSED [ 16%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g3-int32] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g3-int64] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g4-int32] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g4-int64] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g5-int32] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g5-int64] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g6-int32] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp3-g0] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-both-g6-int64] PASSED [ 17%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g0-int32] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g0-int64] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g1-int32] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g1-int64] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g2-int32] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g2-int64] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g3-int32] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g3-int64] PASSED [ 18%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g4-int32] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g4-int64] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g5-int32] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g5-int64] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g6-int32] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-right-g6-int64] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g0-int32] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g0-int64] PASSED [ 19%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g1-int32] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g1-int64] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g2-int32] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g2-int64] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g3-int32] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g3-int64] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g4-int32] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g4-int64] PASSED [ 20%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g5-int32] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g5-int64] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g6-int32] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-True-left-g6-int64] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp3-g1] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g0-int32] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g0-int64] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g1-int32] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g1-int64] PASSED [ 21%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g2-int32] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g2-int64] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g3-int32] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g3-int64] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g4-int32] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g4-int64] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g5-int32] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g5-int64] PASSED [ 22%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g6-int32] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-none-g6-int64] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g0-int32] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g0-int64] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g1-int32] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp4-g0] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g1-int64] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g2-int32] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g2-int64] PASSED [ 23%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g3-int32] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g3-int64] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g4-int32] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g4-int64] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g5-int32] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g5-int64] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g6-int32] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-both-g6-int64] PASSED [ 24%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g0-int32] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g0-int64] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-v-shp4-g1] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g1-int32] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g1-int64] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g2-int32] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g2-int64] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g3-int32] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g3-int64] PASSED [ 25%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g4-int32] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g4-int64] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g5-int32] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g5-int64] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g6-int32] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-right-g6-int64] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g0-int32] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g0-int64] PASSED [ 26%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g1-int32] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp2-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp3-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp3-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp4-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-div-e-e-shp4-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp0-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp0-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp1-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp1-g1] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp2-g0] PASSED [ 80%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp2-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp4-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-u-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp0-g0] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g1-int64] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g2-int32] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g2-int64] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g3-int32] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g3-int64] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g4-int32] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g4-int64] PASSED [ 27%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g5-int32] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g5-int64] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g6-int32] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2[2-False-False-left-g6-int64] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-none-g0-int32] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-none-g0-int64] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-none-g1-int32] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-none-g1-int64] PASSED [ 28%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-both-g0-int32] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-both-g0-int64] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp0-g1] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-both-g1-int32] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-both-g1-int64] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-right-g0-int32] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-right-g0-int64] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-right-g1-int32] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-right-g1-int64] PASSED [ 29%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-none-g0-int32] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-none-g0-int64] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-none-g1-int32] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp1-g0] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-none-g1-int64] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-both-g0-int32] PASSED [ 30%]PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp1-g1] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-both-g0-int64] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-both-g1-int32] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-both-g1-int64] PASSED [ 30%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-right-g0-int32] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-right-g0-int64] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-right-g1-int32] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp2-g0] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-False-right-g1-int64] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-none-g0-int32] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-none-g0-int64] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-none-g1-int32] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp2-g1] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-none-g1-int64] PASSED [ 31%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-both-g0-int32] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g0] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-both-g0-int64] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-both-g1-int32] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp3-g1] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-both-g1-int64] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g0] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-right-g0-int32] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-right-g0-int64] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-right-g1-int32] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-True-right-g1-int64] PASSED [ 32%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-none-g0-int32] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-v-shp4-g1] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-none-g0-int64] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-none-g1-int32] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-none-g1-int64] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g0] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-both-g0-int32] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-both-g0-int64] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp0-g1] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-both-g1-int32] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-both-g1-int64] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp1-g0] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-right-g0-int32] PASSED [ 33%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-right-g0-int64] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp1-g1] PASSED [ 34%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-right-g1-int32] PASSED [ 34%] tests/mxnet/test_nn.py::test_graph_conv2_bi[2-False-False-right-g1-int64] PASSED [ 34%] tests/mxnet/test_nn.py::test_tagconv[1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp2-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp2-g1] PASSED [ 34%] tests/mxnet/test_nn.py::test_tagconv[2] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp3-g0] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp3-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp4-g0] PASSED [ 34%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g0-int32] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-u-e-shp4-g1] PASSED [ 81%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp0-g0] PASSED [ 34%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g0-int64] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp1-g1] PASSED [ 34%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g1-int32] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp2-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp3-g0] PASSED [ 34%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g1-int64] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp3-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-u-shp4-g1] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g2-int32] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp1-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp2-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp3-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp3-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp4-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-v-shp4-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp0-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp0-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp1-g0] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g2-int64] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp1-g1] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp2-g0] PASSED [ 82%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp2-g1] PASSED [ 83%]PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g3-int32] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp3-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-v-e-shp4-g1] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g3-int64] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp0-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp0-g1] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g4-int32] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp1-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp1-g1] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g4-int64] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp2-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp2-g1] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g5-int32] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp3-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp3-g1] PASSED [ 35%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g5-int64] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-u-shp4-g1] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g6-int32] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp0-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp0-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp1-g0] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-1-g6-int64] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp1-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp2-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp2-g1] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp3-g0] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g0-int32] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp3-g1] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g0-int64] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp4-g0] PASSED [ 83%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-v-shp4-g1] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g1-int32] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-dot-e-e-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp1-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp2-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp2-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp3-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp3-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp4-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-u-shp4-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp0-g0] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp0-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp1-g0] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g1-int64] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp1-g1] PASSED [ 84%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp2-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp3-g0] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g2-int32] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp4-g0] PASSED [ 36%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g2-int64] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-v-shp4-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp0-g1] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g3-int32] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp1-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp2-g1] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g3-int64] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp3-g1] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g4-int32] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp4-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-u-e-shp4-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp0-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp0-g1] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g4-int64] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp1-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp1-g1] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g5-int32] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp2-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp2-g1] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g5-int64] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp3-g0] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp3-g1] PASSED [ 85%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp4-g0] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g6-int32] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-u-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp0-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp1-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp2-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp3-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp3-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-v-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp0-g1] PASSED [ 37%] tests/mxnet/test_nn.py::test_gat_conv[1-20-g6-int64] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp1-g0] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g0-int32] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp1-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp2-g0] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g0-int64] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp2-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp3-g0] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g1-int32] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp3-g1] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g1-int64] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp4-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-v-e-shp4-g1] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp0-g0] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp0-g1] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g2-int32] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp1-g0] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g2-int64] PASSED [ 86%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp2-g0] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g3-int32] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp2-g1] PASSED [ 38%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g3-int64] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp3-g1] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g4-int32] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-u-shp4-g1] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g4-int64] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp0-g1] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g5-int32] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp1-g1] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g5-int64] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp2-g0] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g6-int32] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp3-g1] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-1-g6-int64] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp4-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-v-shp4-g1] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g0-int32] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp0-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp0-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp1-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp1-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp2-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp2-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp3-g0] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp3-g1] PASSED [ 87%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_lhs-e-e-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp1-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp2-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp2-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp3-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp4-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-u-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g0] PASSED [ 39%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g0-int64] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp0-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g0] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g1-int32] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp1-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp2-g0] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g1-int64] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp2-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g0] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g2-int32] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g2-int64] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp3-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g0] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g3-int32] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-v-shp4-g1] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g0] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp0-g1] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g3-int64] PASSED [ 88%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp1-g1] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g4-int32] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g0] PASSED [ 40%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g4-int64] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g0] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g5-int32] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp3-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-u-e-shp4-g1] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g5-int64] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g0] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g6-int32] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp0-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g0] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv[5-20-g6-int64] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp1-g1] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g0-int32] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g0] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g0-int64] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g0] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g1-int32] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp3-g1] PASSED [ 41%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g1-int64] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-u-shp4-g1] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g0-int32] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp0-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp0-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp1-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp1-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp2-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp2-g1] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp3-g0] PASSED [ 89%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp4-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-v-shp4-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g0] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g0-int64] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp0-g1] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g1-int32] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g1-int64] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp1-g1] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g0-int32] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g0] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g0-int64] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp2-g1] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g1-int32] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g0] PASSED [ 42%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g1-int64] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp3-g1] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g0] PASSED [ 43%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g0-int32] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-v-e-shp4-g1] PASSED [ 43%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g0-int64] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g0] PASSED [ 43%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g1-int32] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp0-g1] PASSED [ 43%] tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g1-int64] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp1-g1] PASSED [ 43%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g0-int32] PASSED [ 43%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g0-int64] PASSED [ 43%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g1-int32] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g0] PASSED [ 43%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g1-int64] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp2-g1] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g2-int32] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g2-int64] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g0] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp3-g1] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g3-int32] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g3-int64] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g0] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g4-int32] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g4-int64] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-u-shp4-g1] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g5-int32] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g0] PASSED [ 44%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g5-int64] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g6-int32] PASSED [ 90%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp0-g1] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g6-int64] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g7-int32] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g0] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-mean-g7-int64] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp1-g1] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g0-int32] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g0-int64] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g1-int32] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g0] PASSED [ 45%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g1-int64] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g2-int32] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp2-g1] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g2-int64] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g3-int32] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g3-int64] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g0] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g4-int32] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g4-int64] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp3-g1] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g5-int32] PASSED [ 46%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g5-int64] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g6-int32] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g0] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g6-int64] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g7-int32] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-pool-g7-int64] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-v-shp4-g1] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g0-int32] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g0-int64] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g1-int32] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp0-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp0-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp1-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp1-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp2-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp2-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp3-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp3-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp4-g0] PASSED [ 91%] tests/compute/test_sparse.py::test_sddmm[idtype1-copy_rhs-e-e-shp4-g1] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-src-g0] PASSED [ 47%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g1-int64] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g2-int32] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g2-int64] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g3-int32] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g3-int64] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g4-int32] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp0-dst-g0] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g4-int64] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g5-int32] PASSED [ 48%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g5-int64] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g6-int32] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g6-int64] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g7-int32] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-src-g0] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[1-gcn-g7-int64] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g0-int32] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g0-int64] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g1-int32] PASSED [ 49%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g1-int64] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp1-dst-g0] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g2-int32] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g2-int64] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g3-int32] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g3-int64] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g4-int32] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g4-int64] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g5-int32] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-src-g0] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g5-int64] PASSED [ 50%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g6-int32] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g6-int64] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g7-int32] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype0-shp2-dst-g0] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-mean-g7-int64] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g0-int32] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g0-int64] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g1-int32] PASSED [ 91%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-src-g0] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g1-int64] PASSED [ 51%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g2-int32] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g2-int64] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp0-dst-g0] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g3-int32] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g3-int64] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g4-int32] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g4-int64] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g5-int32] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-src-g0] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g5-int64] PASSED [ 52%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g6-int32] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g6-int64] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp1-dst-g0] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g7-int32] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-pool-g7-int64] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g0-int32] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g0-int64] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g1-int32] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-src-g0] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g1-int64] PASSED [ 53%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g2-int32] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g2-int64] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g3-int32] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g3-int64] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g4-int32] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g4-int64] PASSED [ 92%] tests/compute/test_sparse.py::test_edge_softmax[idtype1-shp2-dst-g0] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g5-int32] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g5-int64] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[sum] PASSED [ 54%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g6-int32] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g6-int64] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g7-int32] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv[10-gcn-g7-int64] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g0-int32] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g0-int64] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g1-int32] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[max] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g1-int64] PASSED [ 55%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g2-int32] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[min] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-mean-g2-int64] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g0-int32] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_reduce[mean] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g0-int64] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g1-int32] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g1-int64] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g2-int32] PASSED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-1-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-8-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-16-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-64-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype0-0.01-256-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-1-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-idtype0] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-8-idtype1] SKIPPED [ 92%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-16-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-64-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype1-0.01-256-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-1-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-8-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-16-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-64-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype2-0.003-256-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-1-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-8-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-16-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-64-idtype1] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-idtype0] SKIPPED [ 93%] tests/compute/test_sparse.py::test_segment_mm[dtype3-0.0001-256-idtype1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype0-0.01-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype1-0.02-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype2-0.003-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-1] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-8] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-16] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-64] SKIPPED [ 94%] tests/compute/test_sparse.py::test_gather_mm_idx_b[dtype3-0.0001-256] SKIPPED [ 94%] tests/compute/test_sparse.py::test_use_libxsmm_switch SKIPPED (Only ...) [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[idtype0] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-pool-g2-int64] PASSED [ 56%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g0-int32] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g0-int64] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g1-int32] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g1-int64] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g2-int32] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[1-gcn-g2-int64] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g0-int32] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_update_all[idtype1] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g0-int64] PASSED [ 57%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g1-int32] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g1-int64] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g2-int32] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-mean-g2-int64] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g0-int32] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g0-int64] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g1-int32] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g1-int64] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[idtype0] PASSED [ 58%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g2-int32] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-pool-g2-int64] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g0-int32] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g0-int64] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g1-int32] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g1-int64] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g2-int32] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi[2-gcn-g2-int64] PASSED [ 59%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-mean-int32] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-mean-int64] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-pool-int32] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-pool-int64] PASSED [ 94%] tests/compute/test_specialization.py::test_v2v_snr[idtype1] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-gcn-int32] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[1-gcn-int64] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-mean-int32] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-mean-int64] PASSED [ 60%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-pool-int32] PASSED [ 61%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-pool-int64] PASSED [ 61%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-gcn-int32] PASSED [ 61%] tests/mxnet/test_nn.py::test_sage_conv_bi2[2-gcn-int64] PASSED [ 61%] tests/mxnet/test_nn.py::test_gg_conv PASSED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[idtype0] PASSED [ 61%] tests/mxnet/test_nn.py::test_cheb_conv[1] PASSED [ 61%] tests/mxnet/test_nn.py::test_cheb_conv[20] PASSED [ 95%] tests/compute/test_specialization.py::test_v2v_pull[idtype1] PASSED [ 61%] tests/mxnet/test_nn.py::test_agnn_conv[g0-int32] PASSED [ 61%] tests/mxnet/test_nn.py::test_agnn_conv[g0-int64] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g1-int32] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g1-int64] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g2-int32] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g2-int64] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g3-int32] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g3-int64] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g4-int32] PASSED [ 62%] tests/mxnet/test_nn.py::test_agnn_conv[g4-int64] PASSED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype0] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv[g5-int32] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv[g5-int64] PASSED [ 95%] tests/compute/test_specialization.py::test_update_all_multi_fallback[idtype1] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv[g6-int32] PASSED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[idtype0] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv[g6-int64] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv_bi[g0-int32] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv_bi[g0-int64] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv_bi[g1-int32] PASSED [ 63%] tests/mxnet/test_nn.py::test_agnn_conv_bi[g1-int64] PASSED [ 64%] tests/mxnet/test_nn.py::test_appnp_conv PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_cheb_conv[1] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_cheb_conv[2] PASSED [ 95%] tests/compute/test_specialization.py::test_pull_multi_fallback[idtype1] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-both-int32] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-both-int64] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-right-int32] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-right-int64] PASSED [ 64%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-none-int32] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-none-int64] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-both-int32] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-both-int64] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-right-int32] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-right-int64] PASSED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[idtype0] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-none-int32] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-none-int64] PASSED [ 65%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-both-int32] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-both-int64] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-right-int32] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-right-int64] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-none-int32] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-none-int64] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-both-int32] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-both-int64] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-right-int32] PASSED [ 95%] tests/compute/test_specialization.py::test_spmv_3d_feat[idtype1] PASSED [ 66%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-right-int64] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-none-int32] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-none-int64] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-both-int32] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-both-int64] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-right-int32] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-right-int64] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-none-int32] PASSED [ 67%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-none-int64] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-both-int32] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-both-int64] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-right-int32] PASSED [ 95%] tests/compute/test_subgraph.py::test_edge_subgraph PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-right-int64] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-none-int32] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[idtype0] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_mask[idtype1] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-none-int64] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-both-int32] PASSED [ 68%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-both-int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph1[idtype0] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-right-int32] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-right-int64] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-none-int32] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[1-g6-none-int64] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-both-int32] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-both-int64] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-right-int32] PASSED [ 69%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-right-int64] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-none-int32] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g0-none-int64] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-both-int32] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-both-int64] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-right-int32] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph1[idtype1] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-right-int64] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-none-int32] PASSED [ 70%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g1-none-int64] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-both-int32] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-both-int64] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-right-int32] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-right-int64] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-none-int32] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g2-none-int64] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-both-int32] PASSED [ 71%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-both-int64] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-right-int32] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-right-int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[idtype0] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-none-int32] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g3-none-int64] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-both-int32] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-both-int64] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-right-int32] PASSED [ 72%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-right-int64] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-none-int32] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g4-none-int64] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-both-int32] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-both-int64] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-right-int32] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-right-int64] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-none-int32] PASSED [ 73%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g5-none-int64] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-both-int32] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-both-int64] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-right-int32] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-right-int64] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-none-int32] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_graph_conv[2-g6-none-int64] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g0-int32] PASSED [ 74%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g0-int64] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g1-int32] PASSED [ 95%] tests/compute/test_subgraph.py::test_in_subgraph[idtype1] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g1-int64] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g2-int32] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g2-int64] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g3-int32] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g3-int64] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g4-int32] PASSED [ 75%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g4-int64] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g5-int32] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g5-int64] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g6-int32] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g6-int64] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g7-int32] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g7-int64] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g8-int32] PASSED [ 76%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g8-int64] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g9-int32] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[1-g9-int64] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g0-int32] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g0-int64] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g1-int32] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g1-int64] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g2-int32] PASSED [ 77%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g2-int64] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g3-int32] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g3-int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[idtype0] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g4-int32] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g4-int64] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g5-int32] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g5-int64] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g6-int32] PASSED [ 78%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g6-int64] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g7-int32] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g7-int64] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g8-int32] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g8-int64] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g9-int32] PASSED [ 79%] tests/mxnet/test_nn.py::test_dense_sage_conv[2-g9-int64] PASSED [ 79%] tests/mxnet/test_nn.py::test_edge_conv[1-g0-int32] PASSED [ 79%] tests/mxnet/test_nn.py::test_edge_conv[1-g0-int64] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g1-int32] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g1-int64] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g2-int32] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g2-int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_out_subgraph[idtype1] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g3-int32] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g3-int64] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g4-int32] PASSED [ 80%] tests/mxnet/test_nn.py::test_edge_conv[1-g4-int64] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[1-g5-int32] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[1-g5-int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_subgraph_message_passing PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[1-g6-int32] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[1-g6-int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype0] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[2-g0-int32] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[2-g0-int64] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[2-g1-int32] PASSED [ 81%] tests/mxnet/test_nn.py::test_edge_conv[2-g1-int64] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g2-int32] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g2-int64] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g3-int32] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g3-int64] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g4-int32] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g4-int64] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g5-int32] PASSED [ 82%] tests/mxnet/test_nn.py::test_edge_conv[2-g5-int64] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv[2-g6-int32] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv[2-g6-int64] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[1-g0-int32] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[1-g0-int64] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[1-g1-int32] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[1-g1-int64] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[2-g0-int32] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[2-g0-int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype1] PASSED [ 83%] tests/mxnet/test_nn.py::test_edge_conv_bi[2-g1-int32] PASSED [ 84%] tests/mxnet/test_nn.py::test_edge_conv_bi[2-g1-int64] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g0-int32] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g0-int64] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g1-int32] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g1-int64] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g2-int32] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g2-int64] PASSED [ 84%] tests/mxnet/test_nn.py::test_gin_conv[mean-g3-int32] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g3-int64] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g4-int32] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g4-int64] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g5-int32] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g5-int64] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g6-int32] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g6-int64] PASSED [ 85%] tests/mxnet/test_nn.py::test_gin_conv[mean-g7-int32] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[mean-g7-int64] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g0-int32] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g0-int64] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g1-int32] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g1-int64] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g2-int32] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g2-int64] PASSED [ 86%] tests/mxnet/test_nn.py::test_gin_conv[max-g3-int32] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g3-int64] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g4-int32] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g4-int64] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g5-int32] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g5-int64] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g6-int32] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g6-int64] PASSED [ 87%] tests/mxnet/test_nn.py::test_gin_conv[max-g7-int32] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[max-g7-int64] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g0-int32] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g0-int64] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g1-int32] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g1-int64] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g2-int32] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g2-int64] PASSED [ 88%] tests/mxnet/test_nn.py::test_gin_conv[sum-g3-int32] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g3-int64] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g4-int32] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g4-int64] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g5-int32] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g5-int64] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g6-int32] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g6-int64] PASSED [ 89%] tests/mxnet/test_nn.py::test_gin_conv[sum-g7-int32] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv[sum-g7-int64] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g0-int32] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g0-int64] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g1-int32] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g1-int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype0] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g2-int32] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[mean-g2-int64] PASSED [ 90%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g0-int32] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g0-int64] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g1-int32] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g1-int64] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g2-int32] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[max-g2-int64] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g0-int32] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g0-int64] PASSED [ 91%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g1-int32] PASSED [ 92%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g1-int64] PASSED [ 92%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g2-int32] PASSED [ 92%] tests/mxnet/test_nn.py::test_gin_conv_bi[sum-g2-int64] PASSED [ 92%] tests/mxnet/test_nn.py::test_gmm_conv[g0-int32] PASSED [ 92%] tests/mxnet/test_nn.py::test_gmm_conv[g0-int64] PASSED [ 92%] tests/mxnet/test_nn.py::test_gmm_conv[g1-int32] PASSED [ 92%] tests/mxnet/test_nn.py::test_gmm_conv[g1-int64] PASSED [ 92%] tests/mxnet/test_nn.py::test_gmm_conv[g2-int32] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g2-int64] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g3-int32] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g3-int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype1] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g4-int32] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g4-int64] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g5-int32] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g5-int64] PASSED [ 93%] tests/mxnet/test_nn.py::test_gmm_conv[g6-int32] PASSED [ 94%] tests/mxnet/test_nn.py::test_gmm_conv[g6-int64] PASSED [ 94%] tests/mxnet/test_nn.py::test_gmm_conv_bi[g0-int32] PASSED [ 94%] tests/mxnet/test_nn.py::test_gmm_conv_bi[g0-int64] PASSED [ 94%] tests/mxnet/test_nn.py::test_gmm_conv_bi[g1-int32] PASSED [ 94%] tests/mxnet/test_nn.py::test_gmm_conv_bi[g1-int64] PASSED [ 94%] tests/mxnet/test_nn.py::test_nn_conv[g0-int32] PASSED [ 94%] tests/mxnet/test_nn.py::test_nn_conv[g0-int64] PASSED [ 94%] tests/mxnet/test_nn.py::test_nn_conv[g1-int32] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g1-int64] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g2-int32] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g2-int64] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g3-int32] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g3-int64] PASSED [ 95%] tests/compute/test_subgraph.py::test_subframes[/cpu:0-parent_idx_device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[/cpu:0-parent_idx_device1] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[/cpu:0-parent_idx_device2] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[/cpu:0-parent_idx_device3] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[/gpu:0-parent_idx_device0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[/gpu:0-parent_idx_device1] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[/gpu:0-parent_idx_device2] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_subframes[/gpu:0-parent_idx_device3] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype0-/cpu:0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype0-/gpu:0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype1-/cpu:0] SKIPPED [ 96%] tests/compute/test_subgraph.py::test_uva_subgraph[idtype1-/gpu:0] SKIPPED [ 96%] tests/compute/test_transform.py::test_line_graph1 PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[idtype0] PASSED [ 96%] tests/compute/test_transform.py::test_line_graph2[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_no_backtracking PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g4-int32] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g4-int64] PASSED [ 95%] tests/mxnet/test_nn.py::test_nn_conv[g5-int32] PASSED [ 96%] tests/compute/test_transform.py::test_reverse[idtype0] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv[g5-int64] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv[g6-int32] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv[g6-int64] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv[g7-int32] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv[g7-int64] PASSED [ 96%] tests/compute/test_transform.py::test_reverse[idtype1] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv_bi[g0-int32] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv_bi[g0-int64] PASSED [ 96%] tests/mxnet/test_nn.py::test_nn_conv_bi[g1-int32] PASSED [ 97%] tests/mxnet/test_nn.py::test_nn_conv_bi[g1-int64] PASSED [ 97%] tests/mxnet/test_nn.py::test_nn_conv_bi[g2-int32] PASSED [ 97%] tests/mxnet/test_nn.py::test_nn_conv_bi[g2-int64] PASSED [ 97%] tests/mxnet/test_nn.py::test_sg_conv[1] PASSED [ 96%] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] PASSED [ 97%] tests/mxnet/test_nn.py::test_sg_conv[2] PASSED [ 97%] tests/mxnet/test_nn.py::test_set2set PASSED [ 96%] tests/compute/test_transform.py::test_to_bidirected PASSED [ 96%] tests/compute/test_transform.py::test_add_reverse_edges PASSED [ 96%] tests/compute/test_transform.py::test_simple_graph PASSED [ 96%] tests/compute/test_transform.py::test_khop_graph PASSED [ 97%] tests/mxnet/test_nn.py::test_glob_att_pool PASSED [ 97%] tests/mxnet/test_nn.py::test_simple_pool PASSED [ 98%] tests/mxnet/test_nn.py::test_rgcn[1] PASSED [ 98%] tests/mxnet/test_nn.py::test_rgcn[2] PASSED [ 98%] tests/mxnet/test_nn.py::test_rgcn[8] PASSED [ 98%] tests/mxnet/test_nn.py::test_sequential PASSED [ 98%] tests/mxnet/test_nn.py::test_hetero_conv[sum-int32] PASSED [ 96%] tests/compute/test_transform.py::test_khop_adj PASSED [ 96%] tests/compute/test_transform.py::test_laplacian_lambda_max PASSED [ 97%] tests/compute/test_transform.py::test_partition_with_halo PASSED [ 98%] tests/mxnet/test_nn.py::test_hetero_conv[sum-int64] PASSED [ 97%] tests/compute/test_transform.py::test_metis_partition[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_metis_partition[idtype1] PASSED [ 98%] tests/mxnet/test_nn.py::test_hetero_conv[max-int32] PASSED [ 98%] tests/mxnet/test_nn.py::test_hetero_conv[max-int64] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[min-int32] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[min-int64] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_nodes PASSED [ 97%] tests/compute/test_transform.py::test_compact[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_compact[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_to_simple[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_to_simple[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_to_block[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_to_block[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_remove_edges[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_edges[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_nodes[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_remove_nodes[idtype1] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[mean-int32] PASSED [ 97%] tests/compute/test_transform.py::test_add_selfloop[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_add_selfloop[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_remove_selfloop[idtype1] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[idtype0] PASSED [ 97%] tests/compute/test_transform.py::test_reorder_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_norm_by_dst[idtype0] SKIPPED (...) [ 98%] tests/compute/test_transform.py::test_norm_by_dst[idtype1] SKIPPED (...) [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_self_loop[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_remove_self_loop[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_reverse[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_to_simple[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_to_simple[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_line_graph[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_line_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_khop_graph[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_add_metapaths[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_compose[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[idtype0] PASSED [ 98%] tests/compute/test_transform.py::test_module_gcnnorm[idtype1] PASSED [ 98%] tests/compute/test_transform.py::test_module_ppr[idtype0] SKIPPED (O...) [ 98%] tests/compute/test_transform.py::test_module_ppr[idtype1] SKIPPED (O...) [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[idtype0] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_heat_kernel[idtype1] SKIPPED [ 98%] tests/compute/test_transform.py::test_module_gdc[idtype0] SKIPPED (O...) [ 99%] tests/compute/test_transform.py::test_module_gdc[idtype1] SKIPPED (O...) [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_node_shuffle[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_drop_node[idtype0] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_node[idtype1] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[idtype0] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_drop_edge[idtype1] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_add_edge[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_add_edge[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_random_walk_pe[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[idtype0] PASSED [ 99%] tests/compute/test_transform.py::test_module_laplacian_pe[idtype1] PASSED [ 99%] tests/compute/test_transform.py::test_module_sign[g0] SKIPPED (Only ...) [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[idtype0] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_row_feat_normalizer[idtype1] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[idtype0] SKIPPED [ 99%] tests/compute/test_transform.py::test_module_feat_mask[idtype1] SKIPPED [ 99%] tests/compute/test_traversal.py::test_bfs[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_bfs[idtype1] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_topological_nodes[idtype1] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[idtype0] PASSED [ 99%] tests/compute/test_traversal.py::test_dfs_labeled_edges[idtype1] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[mean-int64] PASSED [100%] =============================== warnings summary =============================== python/dgl/backend/backend.py:1741 /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/backend/backend.py:1741: DeprecationWarning: invalid escape sequence \P """ ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/iterator_ops.py:546 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/iterator_ops.py:546: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working class IteratorBase(collections.Iterator, trackable.Trackable, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py:106 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py:106: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working class DatasetV2(collections.Iterable, tracking_base.Trackable, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/autograph/utils/testing.py:21 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/autograph/utils/testing.py:21: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses import imp ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _nlv = LooseVersion(_np_version) ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p16 = _nlv < LooseVersion("1.16") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p17 = _nlv < LooseVersion("1.17") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p18 = _nlv < LooseVersion("1.18") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p19 = _nlv < LooseVersion("1.19") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p20 = _nlv < LooseVersion("1.20") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. other = LooseVersion(other) ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(_np_version) >= LooseVersion("1.17.0"): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:23 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:23: DeprecationWarning: NEAREST is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.NEAREST or Dither.NONE instead. 'nearest': pil_image.NEAREST, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:24 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:24: DeprecationWarning: BILINEAR is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BILINEAR instead. 'bilinear': pil_image.BILINEAR, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:25 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:25: DeprecationWarning: BICUBIC is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BICUBIC instead. 'bicubic': pil_image.BICUBIC, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:28 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:28: DeprecationWarning: HAMMING is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.HAMMING instead. if hasattr(pil_image, 'HAMMING'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:29 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:29: DeprecationWarning: HAMMING is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.HAMMING instead. _PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:30 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:30: DeprecationWarning: BOX is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BOX instead. if hasattr(pil_image, 'BOX'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:31 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:31: DeprecationWarning: BOX is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BOX instead. _PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:33 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:33: DeprecationWarning: LANCZOS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead. if hasattr(pil_image, 'LANCZOS'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:34 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:34: DeprecationWarning: LANCZOS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead. _PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS python/dgl/backend/tensorflow/tensor.py:15 python/dgl/backend/tensorflow/tensor.py:15 /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/backend/tensorflow/tensor.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(tf.__version__) < LooseVersion("2.3.0"): tests/compute/test_basics.py: 2 warnings tests/compute/test_filter.py: 1 warning tests/compute/test_graph.py: 9 warnings tests/compute/test_kernel.py: 3 warnings tests/compute/test_propagate.py: 2 warnings tests/compute/test_removal.py: 16 warnings tests/compute/test_serialize.py: 3 warnings tests/compute/test_specialization.py: 12 warnings tests/compute/test_subgraph.py: 2 warnings tests/compute/test_transform.py: 10 warnings tests/compute/test_traversal.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/compute/test_basics.py: 2 warnings tests/compute/test_batched_graph.py: 8 warnings tests/compute/test_graph.py: 2 warnings tests/compute/test_kernel.py: 1 warning tests/compute/test_propagate.py: 2 warnings tests/compute/test_removal.py: 10 warnings tests/compute/test_specialization.py: 10 warnings tests/compute/test_subgraph.py: 2 warnings /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:354: DGLWarning: DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges dgl_warning("DGLGraph.add_edge is deprecated. Please use DGLGraph.add_edges") tests/compute/test_basics.py::test_update_all_0deg[idtype0] tests/compute/test_basics.py::test_update_all_0deg[idtype1] tests/compute/test_basics.py::test_pull_0deg[idtype0] tests/compute/test_basics.py::test_pull_0deg[idtype1] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype0] tests/compute/test_propagate.py::test_prop_nodes_topo[idtype1] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/core.py:79: DGLWarning: The input graph for the user-defined edge function does not contain valid edges dgl_warning('The input graph for the user-defined edge function ' \ tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype0] tests/compute/test_batched_graph.py::test_batched_edge_ordering[idtype1] tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_transform.py::test_no_backtracking tests/compute/test_transform.py::test_reverse[idtype0] tests/compute/test_transform.py::test_reverse[idtype1] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:2978: DGLWarning: DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids. dgl_warning("DGLGraph.edge_id is deprecated. Please use DGLGraph.edge_ids.") tests/compute/test_batched_heterograph.py::test_features[idtype0] tests/compute/test_batched_heterograph.py::test_features[idtype1] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/batch.py:159: DGLWarning: Arguments edge_attrs has been deprecated. Please use edata instead. dgl_warning('Arguments edge_attrs has been deprecated. Please use' tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype0] tests/compute/test_csrmm.py::test_csrmm[dtype0-idtype1] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype0] tests/compute/test_csrmm.py::test_csrmm[dtype1-idtype1] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype0] tests/compute/test_csrmm.py::test_csrsum[dtype0-idtype1] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype0] tests/compute/test_csrmm.py::test_csrsum[dtype1-idtype1] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph_index.py:797: FutureWarning: Adjacency matrix by default currently returns edge IDs. As a result there is one 0 entry which is not eliminated. In the next release it will return 1s by default, and 0 will be eliminated otherwise. FutureWarning) tests/compute/test_data.py::test_citation_graph /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/data/citation_graph.py:287: RuntimeWarning: divide by zero encountered in power r_inv = np.power(rowsum, -1).flatten() tests/compute/test_data.py::test_csvdataset /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/numpy/lib/arraysetops.py:565: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison mask &= (ar1 != a) tests/compute/test_data.py::test_csvdataset /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/data/csv_dataset_base.py:298: DGLWarning: Unamed column is found. Ignored... dgl_warning("Unamed column is found. Ignored...") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query tests/compute/test_heterograph.py::test_query[idtype0] tests/compute/test_heterograph.py::test_query[idtype1] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:2753: DGLWarning: DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes dgl_warning("DGLGraph.has_node is deprecated. Please use DGLGraph.has_nodes") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:2687: DGLWarning: DGLGraph.__contains__ is deprecated. Please directly call has_nodes. dgl_warning('DGLGraph.__contains__ is deprecated.' tests/compute/test_graph.py::test_query tests/compute/test_sampling.py::test_non_uniform_random_walk[False] tests/compute/test_sampling.py::test_uniform_random_walk[False] tests/compute/test_sampling.py::test_node2vec tests/compute/test_transform.py::test_no_backtracking /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:2851: DGLWarning: DGLGraph.has_edge_between is deprecated. Please use DGLGraph.has_edges_between dgl_warning("DGLGraph.has_edge_between is deprecated. " tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:3432: DGLWarning: DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees dgl_warning("DGLGraph.in_degree is deprecated. Please use DGLGraph.in_degrees") tests/compute/test_graph.py::test_query tests/compute/test_graph.py::test_hypersparse_query /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:3516: DGLWarning: DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees dgl_warning("DGLGraph.out_degree is deprecated. Please use DGLGraph.out_degrees") tests/compute/test_graph.py::test_query /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly', 'sort_csr'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648@2/tests/compute/test_heterograph.py:1128: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(src_i)) == nid[src[i]] tests/compute/test_heterograph.py: 20 warnings /root/jenkins/workspace/dgl_PR-4648@2/tests/compute/test_heterograph.py:1129: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead assert np.asscalar(F.asnumpy(dst_i)) == nid[dst[i]] tests/compute/test_heterograph.py::test_invertible_conversion[idtype0] tests/compute/test_heterograph.py::test_invertible_conversion[idtype1] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:2635: DGLWarning: DGLGraph.is_readonly is deprecated in v0.5. DGLGraph now always supports mutable operations like add_nodes and add_edges. dgl_warning('DGLGraph.is_readonly is deprecated in v0.5.\n' tests/compute/test_pickle.py::test_pickling_batched_heterograph /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/batch.py:511: DGLWarning: From v0.5, DGLHeteroGraph is merged into DGLGraph. You can safely replace dgl.batch_hetero with dgl.batch dgl_warning('From v0.5, DGLHeteroGraph is merged into DGLGraph. You can safely' tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/contrib/sampling/sampler.py:317: DGLWarning: dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('dgl.contrib.sampling.NeighborSampler is deprecated starting from v0.5.' tests/compute/test_sampler.py::test_create_full tests/compute/test_sampler.py::test_1neighbor_sampler_all tests/compute/test_sampler.py::test_1neighbor_sampler tests/compute/test_sampler.py::test_prefetch_neighbor_sampler tests/compute/test_sampler.py::test_10neighbor_sampler_all tests/compute/test_sampler.py::test_10neighbor_sampler tests/compute/test_sampler.py::test_layer_sampler tests/compute/test_sampler.py::test_setseed /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/_deprecate/nodeflow.py:99: DGLWarning: NodeFlow APIs are deprecated starting from v0.5. Please read our guide for how to use the new sampling APIs. dgl_warning('NodeFlow APIs are deprecated starting from v0.5. Please read our' tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int32] tests/compute/test_sampling.py::test_global_uniform_negative_sampling[int64] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/sampling/negative.py:102: ComplexWarning: Casting complex values to real discards the imaginary part g._graph, etype_id, num_samples, 3, exclude_self_loops, replace, redundancy) tests/compute/test_serialize.py::test_graph_serialize_with_feature[False] tests/compute/test_serialize.py::test_graph_serialize_without_feature[False] tests/compute/test_serialize.py::test_graph_serialize_with_labels[False] tests/compute/test_transform.py::test_simple_graph /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) tests/compute/test_serialize.py::test_load_old_files1 tests/compute/test_serialize.py::test_load_old_files2 /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/data/graph_serialize.py:179: DGLWarning: You are loading a graph file saved by old version of dgl. Please consider saving it again with the current format. Please consider saving it again with the current format.") tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/transforms/functional.py:1267: DGLWarning: share_ndata argument has been renamed to copy_ndata. dgl_warning('share_ndata argument has been renamed to copy_ndata.') tests/compute/test_transform.py::test_reverse_shared_frames[idtype0] tests/compute/test_transform.py::test_reverse_shared_frames[idtype1] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/transforms/functional.py:1270: DGLWarning: share_edata argument has been renamed to copy_edata. dgl_warning('share_edata argument has been renamed to copy_edata.') tests/compute/test_transform.py::test_simple_graph /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/transforms/functional.py:1319: DGLWarning: dgl.to_simple_graph is renamed to dgl.to_simple in v0.5. dgl_warning('dgl.to_simple_graph is renamed to dgl.to_simple in v0.5.') -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html - generated xml file: /root/jenkins/workspace/dgl_PR-4648@2/pytest_compute.xml - ============================ slowest 100 durations ============================= 196.55s call tests/compute/test_kernel.py::test_all_binary_builtins 132.31s call tests/compute/test_data.py::test_reddit 64.08s call tests/compute/test_data.py::test_fakenews 55.00s call tests/compute/test_data.py::test_gin 36.58s call tests/compute/test_data.py::test_as_graphpred 15.67s call tests/compute/test_transform.py::test_metis_partition[idtype1] 11.99s call tests/compute/test_data.py::test_tudataset_regression 11.59s call tests/compute/test_data.py::test_gnn_benchmark 11.27s call tests/compute/test_data.py::test_explain_syn 10.12s call tests/compute/test_sampling.py::test_sample_neighbors_outedge 8.64s call tests/compute/test_kernel.py::test_copy_src_reduce 7.50s call tests/compute/test_data.py::test_as_graphpred_reprocess 6.15s call tests/compute/test_kernel.py::test_copy_edge_reduce 5.85s call tests/compute/test_sampling.py::test_sample_neighbors_prob 5.74s call tests/compute/test_sampling.py::test_sample_neighbors_noprob 5.56s call tests/compute/test_graph.py::test_query 4.70s call tests/compute/test_data.py::test_wiki_cs 3.98s call tests/compute/test_data.py::test_citation_graph 3.95s call tests/compute/test_heterograph.py::test_query[idtype1] 3.94s call tests/compute/test_heterograph.py::test_query[idtype0] 3.60s call tests/compute/test_data.py::test_flickr 3.22s call tests/compute/test_data.py::test_fraud 3.17s call tests/compute/test_data.py::test_add_nodepred_split 2.88s call tests/compute/test_heterograph.py::test_forking_pickler 2.78s call tests/compute/test_heterograph.py::test_updates[idtype0] 2.73s call tests/compute/test_transform.py::test_remove_nodes[idtype0] 2.72s call tests/compute/test_transform.py::test_remove_nodes[idtype1] 2.59s call tests/compute/test_heterograph.py::test_updates[idtype1] 2.49s call tests/compute/test_specialization.py::test_pull_multi_fallback[idtype0] 2.48s call tests/compute/test_specialization.py::test_pull_multi_fallback[idtype1] 2.46s call tests/compute/test_specialization.py::test_spmv_3d_feat[idtype0] 2.42s call tests/compute/test_transform.py::test_partition_with_halo 2.09s call tests/compute/test_transform.py::test_reorder_nodes 2.06s call tests/compute/test_data.py::test_csvdataset 2.03s call tests/compute/test_specialization.py::test_v2v_pull[idtype0] 2.02s call tests/compute/test_specialization.py::test_spmv_3d_feat[idtype1] 2.00s call tests/compute/test_heterograph.py::test_level2[idtype0] 1.99s call tests/compute/test_specialization.py::test_v2v_pull[idtype1] 1.83s call tests/compute/test_heterograph.py::test_level2[idtype1] 1.80s call tests/compute/test_basics.py::test_update_routines[idtype0] 1.80s call tests/compute/test_basics.py::test_update_routines[idtype1] 1.68s call tests/compute/test_propagate.py::test_prop_edges_dfs[idtype1] 1.68s call tests/compute/test_specialization.py::test_v2v_snr[idtype0] 1.66s call tests/compute/test_propagate.py::test_prop_edges_dfs[idtype0] 1.60s call tests/compute/test_specialization.py::test_v2v_snr[idtype1] 1.45s call tests/compute/test_data.py::test_minigc 1.45s call tests/compute/test_heterograph.py::test_format[idtype1] 1.43s call tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype1] 1.28s call tests/compute/test_subgraph.py::test_khop_in_subgraph[idtype0] 1.28s call tests/compute/test_transform.py::test_khop_graph 1.25s call tests/compute/test_heterograph.py::test_format[idtype0] 1.17s call tests/compute/test_sampler.py::test_10neighbor_sampler_all 1.14s call tests/compute/test_heterograph.py::test_view1[idtype0] 1.14s call tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype0] 1.14s call tests/compute/test_sampling.py::test_sample_neighbors_topk 1.12s call tests/compute/test_subgraph.py::test_out_subgraph[idtype0] 1.09s call tests/compute/test_subgraph.py::test_khop_out_subgraph[idtype1] 1.08s call tests/compute/test_subgraph.py::test_in_subgraph[idtype1] 1.07s call tests/compute/test_subgraph.py::test_in_subgraph[idtype0] 1.07s call tests/compute/test_heterograph.py::test_view1[idtype1] 1.04s call tests/compute/test_subgraph.py::test_out_subgraph[idtype1] 1.00s call tests/compute/test_specialization.py::test_v2v_update_all[idtype0] 0.99s call tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp2-g1] 0.98s call tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp0-g1] 0.95s call tests/compute/test_specialization.py::test_v2v_update_all[idtype1] 0.95s call tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp0-g1] 0.94s call tests/compute/test_heterograph.py::test_types_in_function[idtype0] 0.93s call tests/compute/test_sampling.py::test_uniform_random_walk[False] 0.90s call tests/compute/test_heterograph.py::test_types_in_function[idtype1] 0.90s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-copy_lhs-shp4-g1] 0.90s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp2-g1] 0.89s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g0] 0.88s call tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g0] 0.88s call tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g1] 0.88s call tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g1] 0.87s call tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp0-g1] 0.86s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g1] 0.86s call tests/compute/test_basics.py::test_issue_2484[idtype1] 0.86s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp1-g0] 0.86s call tests/compute/test_sparse.py::test_spmm[idtype0-min-sub-shp4-g1] 0.86s call tests/compute/test_sparse.py::test_spmm[idtype1-sum-add-shp0-g1] 0.85s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp1-g1] 0.85s call tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp1-g0] 0.85s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g0] 0.84s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g1] 0.84s call tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp3-g0] 0.84s call tests/compute/test_basics.py::test_issue_2484[idtype0] 0.83s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp0-g1] 0.83s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp3-g1] 0.83s call tests/compute/test_sparse.py::test_spmm[idtype1-max-add-shp5-g0] 0.83s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-add-shp0-g1] 0.83s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-sub-shp3-g0] 0.82s call tests/compute/test_sparse.py::test_spmm[idtype1-max-sub-shp1-g1] 0.82s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-mul-shp3-g1] 0.82s call tests/compute/test_sparse.py::test_spmm[idtype0-min-div-shp1-g1] 0.81s call tests/compute/test_sparse.py::test_spmm[idtype0-sum-div-shp3-g1] 0.81s call tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp3-g1] 0.81s call tests/compute/test_sparse.py::test_spmm[idtype0-max-copy_rhs-shp1-g0] 0.81s call tests/compute/test_sparse.py::test_spmm[idtype0-min-mul-shp4-g0] 0.80s call tests/compute/test_sparse.py::test_spmm[idtype0-max-sub-shp1-g0] ========= 2308 passed, 150 skipped, 236 warnings in 1246.70s (0:20:46) ========= ============================= test session starts ============================== platform linux -- Python 3.7.0, pytest-7.1.2, pluggy-1.0.0 -- /opt/conda/envs/tensorflow-ci/bin/python3 cachedir: .pytest_cache rootdir: /root/jenkins/workspace/dgl_PR-4648@2 collecting ... PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[stack-int32] collected 920 items tests/tensorflow/test_basic.py::test PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv[1] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv[2] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g0-idtype0] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g0-idtype1] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g1-idtype0] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g1-idtype1] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g2-idtype0] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g2-idtype1] PASSED [ 0%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g3-idtype0] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g3-idtype1] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g4-idtype0] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g4-idtype1] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g5-idtype0] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g5-idtype1] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g6-idtype0] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g6-idtype1] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g0-idtype0] PASSED [ 1%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g0-idtype1] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g1-idtype0] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g1-idtype1] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g2-idtype0] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g2-idtype1] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g3-idtype0] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[stack-int64] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g3-idtype1] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g4-idtype0] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g4-idtype1] PASSED [ 2%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g5-idtype0] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g5-idtype1] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g6-idtype0] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g6-idtype1] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g0-idtype0] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g0-idtype1] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g1-idtype0] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g1-idtype1] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g2-idtype0] PASSED [ 3%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g2-idtype1] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g3-idtype0] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g3-idtype1] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g4-idtype0] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g4-idtype1] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g5-idtype0] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g5-idtype1] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g6-idtype0] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-right-g6-idtype1] PASSED [ 4%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g0-idtype0] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g0-idtype1] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g1-idtype0] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g1-idtype1] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g2-idtype0] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g2-idtype1] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g3-idtype0] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g3-idtype1] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g4-idtype0] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g4-idtype1] PASSED [ 5%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g5-idtype0] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g5-idtype1] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g6-idtype0] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-left-g6-idtype1] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g0-idtype0] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g0-idtype1] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g1-idtype0] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g1-idtype1] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g2-idtype0] PASSED [ 6%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g2-idtype1] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g3-idtype0] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g3-idtype1] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g4-idtype0] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g4-idtype1] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g5-idtype0] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g5-idtype1] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g6-idtype0] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-none-g6-idtype1] PASSED [ 7%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g0-idtype0] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g0-idtype1] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g1-idtype0] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g1-idtype1] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g2-idtype0] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g2-idtype1] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g3-idtype0] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g3-idtype1] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g4-idtype0] PASSED [ 8%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g4-idtype1] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g5-idtype0] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g5-idtype1] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[myagg-int32] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g6-idtype0] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-both-g6-idtype1] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g0-idtype0] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g0-idtype1] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g1-idtype0] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g1-idtype1] PASSED [ 9%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g2-idtype0] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g2-idtype1] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g3-idtype0] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g3-idtype1] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g4-idtype0] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g4-idtype1] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g5-idtype0] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g5-idtype1] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g6-idtype0] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-right-g6-idtype1] PASSED [ 10%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g0-idtype0] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g0-idtype1] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g1-idtype0] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g1-idtype1] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g2-idtype0] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g2-idtype1] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g3-idtype0] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g3-idtype1] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g4-idtype0] PASSED [ 11%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g4-idtype1] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g5-idtype0] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g5-idtype1] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g6-idtype0] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-True-False-left-g6-idtype1] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g0-idtype0] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g0-idtype1] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g1-idtype0] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g1-idtype1] PASSED [ 12%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g2-idtype0] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g2-idtype1] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g3-idtype0] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g3-idtype1] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g4-idtype0] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g4-idtype1] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g5-idtype0] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g5-idtype1] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g6-idtype0] PASSED [ 13%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-none-g6-idtype1] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g0-idtype0] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g0-idtype1] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g1-idtype0] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g1-idtype1] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g2-idtype0] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g2-idtype1] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g3-idtype0] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g3-idtype1] PASSED [ 14%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g4-idtype0] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g4-idtype1] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g5-idtype0] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g5-idtype1] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g6-idtype0] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-both-g6-idtype1] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g0-idtype0] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g0-idtype1] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g1-idtype0] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g1-idtype1] PASSED [ 15%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g2-idtype0] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g2-idtype1] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g3-idtype0] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g3-idtype1] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g4-idtype0] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g4-idtype1] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g5-idtype0] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g5-idtype1] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g6-idtype0] PASSED [ 16%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-right-g6-idtype1] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g0-idtype0] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g0-idtype1] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g1-idtype0] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g1-idtype1] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g2-idtype0] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g2-idtype1] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g3-idtype0] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g3-idtype1] PASSED [ 17%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g4-idtype0] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g4-idtype1] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g5-idtype0] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g5-idtype1] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g6-idtype0] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-True-left-g6-idtype1] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g0-idtype0] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g0-idtype1] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g1-idtype0] PASSED [ 18%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g1-idtype1] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g2-idtype0] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g2-idtype1] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g3-idtype0] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g3-idtype1] PASSED [ 99%] tests/mxnet/test_nn.py::test_hetero_conv[myagg-int64] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g4-idtype0] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g4-idtype1] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g5-idtype0] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g5-idtype1] PASSED [ 19%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g6-idtype0] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-none-g6-idtype1] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g0-idtype0] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g0-idtype1] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g1-idtype0] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g1-idtype1] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g2-idtype0] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g2-idtype1] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g3-idtype0] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g3-idtype1] PASSED [ 20%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g4-idtype0] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g4-idtype1] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g5-idtype0] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g5-idtype1] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g6-idtype0] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-both-g6-idtype1] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g0-idtype0] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g0-idtype1] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g1-idtype0] PASSED [ 21%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g1-idtype1] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g2-idtype0] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g2-idtype1] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g3-idtype0] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g3-idtype1] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g4-idtype0] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g4-idtype1] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g5-idtype0] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g5-idtype1] PASSED [ 22%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g6-idtype0] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-right-g6-idtype1] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g0-idtype0] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g0-idtype1] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g1-idtype0] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g1-idtype1] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g2-idtype0] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g2-idtype1] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g3-idtype0] PASSED [ 23%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g3-idtype1] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g4-idtype0] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g4-idtype1] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g5-idtype0] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g5-idtype1] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g6-idtype0] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[1-False-False-left-g6-idtype1] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g0-idtype0] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g0-idtype1] PASSED [ 24%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g1-idtype0] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g1-idtype1] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g2-idtype0] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g2-idtype1] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g3-idtype0] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g3-idtype1] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g4-idtype0] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g4-idtype1] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g5-idtype0] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g5-idtype1] PASSED [ 25%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g6-idtype0] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-none-g6-idtype1] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g0-idtype0] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g0-idtype1] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g1-idtype0] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g1-idtype1] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g2-idtype0] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g2-idtype1] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g3-idtype0] PASSED [ 26%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g3-idtype1] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g4-idtype0] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g4-idtype1] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g5-idtype0] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g5-idtype1] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g6-idtype0] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-both-g6-idtype1] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g0-idtype0] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g0-idtype1] PASSED [ 27%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g1-idtype0] PASSED [100%] =============================== warnings summary =============================== tests/mxnet/test_nn.py::test_cheb_conv[1] tests/mxnet/test_nn.py::test_cheb_conv[20] /root/jenkins/workspace/dgl_PR-4648@4/python/dgl/nn/mxnet/conv/chebconv.py:121: DGLWarning: lambda_max is not provided, using default value of 2. Please use dgl.laplacian_lambda_max to compute the eigenvalues. "lambda_max is not provided, using default value of 2. " -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html - generated xml file: /root/jenkins/workspace/dgl_PR-4648@4/pytest_backend.xml - ============================ slowest 100 durations ============================= 7.56s call tests/mxnet/test_nn.py::test_hetero_conv[stack-int32] 7.14s call tests/mxnet/test_nn.py::test_hetero_conv[min-int32] 6.67s call tests/mxnet/test_nn.py::test_hetero_conv[mean-int32] 6.57s call tests/mxnet/test_nn.py::test_hetero_conv[min-int64] 6.41s call tests/mxnet/test_nn.py::test_hetero_conv[stack-int64] 6.07s call tests/mxnet/test_nn.py::test_hetero_conv[max-int64] 5.94s call tests/mxnet/test_nn.py::test_hetero_conv[mean-int64] 5.66s call tests/mxnet/test_nn.py::test_hetero_conv[myagg-int32] 5.35s call tests/mxnet/test_nn.py::test_hetero_conv[myagg-int64] 3.10s call tests/mxnet/test_nn.py::test_hetero_conv[sum-int64] 2.41s call tests/mxnet/test_nn.py::test_hetero_conv[max-int32] 2.03s call tests/mxnet/test_nn.py::test_hetero_conv[sum-int32] 1.57s call tests/mxnet/test_nn.py::test_gg_conv 1.50s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g6-int64] 1.47s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g5-int64] 1.43s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g6-int32] 1.39s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g3-int64] 1.36s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g3-int32] 1.33s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g4-int64] 1.32s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g4-int32] 1.27s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g2-int64] 1.26s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g1-int32] 1.24s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g1-int32] 1.22s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g1-int64] 1.22s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g0-int64] 1.20s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g5-int32] 1.20s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g0-int32] 1.20s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g2-int32] 1.19s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g1-int64] 1.13s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g2-int32] 1.13s call tests/mxnet/test_nn.py::test_gat_conv[1-1-g0-int32] 1.12s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g3-int64] 1.10s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g0-int64] 1.06s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g4-int32] 1.04s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g5-int32] 1.04s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g4-int64] 1.03s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g3-int32] 0.99s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g2-int64] 0.86s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g6-int32] 0.83s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g5-int64] 0.83s call tests/mxnet/test_nn.py::test_dense_cheb_conv[2] 0.83s call tests/mxnet/test_nn.py::test_dense_cheb_conv[1] 0.70s call tests/mxnet/test_nn.py::test_tagconv[2] 0.68s call tests/mxnet/test_nn.py::test_gat_conv[1-20-g6-int64] 0.62s call tests/mxnet/test_nn.py::test_tagconv[1] 0.59s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g0-int32] 0.54s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g0-int64] 0.54s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g3-int64] 0.53s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g4-int32] 0.53s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g0-int64] 0.53s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g2-int32] 0.53s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g5-int64] 0.53s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g3-int32] 0.53s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g2-int64] 0.52s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g6-int32] 0.52s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g1-int32] 0.51s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g6-int64] 0.51s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g1-int64] 0.51s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g4-int64] 0.50s call tests/mxnet/test_nn.py::test_gat_conv[5-1-g5-int32] 0.50s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g4-int64] 0.50s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g5-int64] 0.49s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g2-int32] 0.49s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g1-int32] 0.49s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g0-int32] 0.49s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g1-int64] 0.49s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g5-int32] 0.49s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g3-int64] 0.49s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g6-int64] 0.48s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g2-int64] 0.48s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g4-int32] 0.48s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g6-int32] 0.48s call tests/mxnet/test_nn.py::test_gat_conv[5-20-g3-int32] 0.40s call tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g0-int64] 0.39s call tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g0-int32] 0.38s call tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g1-int32] 0.38s call tests/mxnet/test_nn.py::test_gat_conv_bi[1-1-g1-int64] 0.37s call tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-both-g0-int32] 0.37s call tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g1-int64] 0.35s call tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g0-int64] 0.34s call tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g0-int32] 0.34s call tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g1-int32] 0.33s call tests/mxnet/test_nn.py::test_gat_conv_bi[4-1-g1-int32] 0.33s call tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-both-int32] 0.33s call tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-both-int32] 0.33s call tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-both-int32] 0.32s call tests/mxnet/test_nn.py::test_dense_graph_conv[1-g5-both-int32] 0.32s call tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g0-int64] 0.32s call tests/mxnet/test_nn.py::test_dense_graph_conv[1-g1-both-int64] 0.32s call tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g0-int32] 0.32s call tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g1-int64] 0.32s call tests/mxnet/test_nn.py::test_dense_graph_conv[1-g0-both-int64] 0.32s call tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g0-int32] 0.32s call tests/mxnet/test_nn.py::test_dense_graph_conv[1-g3-both-int32] 0.32s call tests/mxnet/test_nn.py::test_gat_conv_bi[4-2-g1-int64] 0.32s call tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g1-int32] 0.31s call tests/mxnet/test_nn.py::test_graph_conv2_bi[1-False-True-both-g0-int64] 0.31s call tests/mxnet/test_nn.py::test_dense_graph_conv[1-g2-both-int64] 0.31s call tests/mxnet/test_nn.py::test_dense_graph_conv[1-g4-both-int32] 0.31s call tests/mxnet/test_nn.py::test_gat_conv_bi[1-2-g0-int64] ================= 806 passed, 2 warnings in 177.66s (0:02:57) ================== [Pipeline] } [Pipeline] // timeout PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g1-idtype1] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g2-idtype0] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g2-idtype1] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g3-idtype0] [Pipeline] } [Pipeline] // stage Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g3-idtype1] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g4-idtype0] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g4-idtype1] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g5-idtype0] PASSED [ 28%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g5-idtype1] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g6-idtype0] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-right-g6-idtype1] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g0-idtype0] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g0-idtype1] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g1-idtype0] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g1-idtype1] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g2-idtype0] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g2-idtype1] PASSED [ 29%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g3-idtype0] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g3-idtype1] [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 6db97419e6b6b2f1427e0e55271c5f8554e9cd989f9b44da6a3861a1be7f5a89 PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g4-idtype0] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g4-idtype1] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g5-idtype0] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g5-idtype1] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g6-idtype0] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-True-left-g6-idtype1] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g0-idtype0] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g0-idtype1] PASSED [ 30%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g1-idtype0] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g1-idtype1] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g2-idtype0] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g2-idtype1] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g3-idtype0] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g3-idtype1] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g4-idtype0] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g4-idtype1] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g5-idtype0] PASSED [ 31%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g5-idtype1] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g6-idtype0] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-none-g6-idtype1] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g0-idtype0] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g0-idtype1] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g1-idtype0] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g1-idtype1] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g2-idtype0] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g2-idtype1] PASSED [ 32%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g3-idtype0] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g3-idtype1] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g4-idtype0] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g4-idtype1] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g5-idtype0] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g5-idtype1] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g6-idtype0] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-both-g6-idtype1] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g0-idtype0] PASSED [ 33%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g0-idtype1] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g1-idtype0] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g1-idtype1] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g2-idtype0] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g2-idtype1] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g3-idtype0] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g3-idtype1] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g4-idtype0] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g4-idtype1] PASSED [ 34%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g5-idtype0] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g5-idtype1] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g6-idtype0] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-right-g6-idtype1] $ docker rm -f 6db97419e6b6b2f1427e0e55271c5f8554e9cd989f9b44da6a3861a1be7f5a89 PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g0-idtype0] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g0-idtype1] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g1-idtype0] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g1-idtype1] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g2-idtype0] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g2-idtype1] PASSED [ 35%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g3-idtype0] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g3-idtype1] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g4-idtype0] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g4-idtype1] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g5-idtype0] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g5-idtype1] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g6-idtype0] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-True-False-left-g6-idtype1] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g0-idtype0] PASSED [ 36%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g0-idtype1] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g1-idtype0] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g1-idtype1] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g2-idtype0] [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g2-idtype1] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g3-idtype0] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g3-idtype1] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g4-idtype0] PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g4-idtype1] [Pipeline] // stage [Pipeline] } PASSED [ 37%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g5-idtype0] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g5-idtype1] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g6-idtype0] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-none-g6-idtype1] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g0-idtype0] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g0-idtype1] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g1-idtype0] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g1-idtype1] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g2-idtype0] PASSED [ 38%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g2-idtype1] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g3-idtype0] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g3-idtype1] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g4-idtype0] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g4-idtype1] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g5-idtype0] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g5-idtype1] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g6-idtype0] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-both-g6-idtype1] PASSED [ 39%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g0-idtype0] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g0-idtype1] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g1-idtype0] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g1-idtype1] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g2-idtype0] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g2-idtype1] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g3-idtype0] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g3-idtype1] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g4-idtype0] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g4-idtype1] PASSED [ 40%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g5-idtype0] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g5-idtype1] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g6-idtype0] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-right-g6-idtype1] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g0-idtype0] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g0-idtype1] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g1-idtype0] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g1-idtype1] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g2-idtype0] PASSED [ 41%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g2-idtype1] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g3-idtype0] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g3-idtype1] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g4-idtype0] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g4-idtype1] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g5-idtype0] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g5-idtype1] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g6-idtype0] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-True-left-g6-idtype1] PASSED [ 42%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g0-idtype0] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g0-idtype1] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g1-idtype0] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g1-idtype1] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g2-idtype0] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g2-idtype1] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g3-idtype0] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g3-idtype1] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g4-idtype0] PASSED [ 43%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g4-idtype1] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g5-idtype0] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g5-idtype1] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g6-idtype0] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-none-g6-idtype1] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g0-idtype0] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g0-idtype1] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g1-idtype0] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g1-idtype1] PASSED [ 44%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g2-idtype0] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g2-idtype1] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g3-idtype0] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g3-idtype1] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g4-idtype0] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g4-idtype1] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g5-idtype0] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g5-idtype1] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g6-idtype0] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-both-g6-idtype1] PASSED [ 45%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g0-idtype0] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g0-idtype1] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g1-idtype0] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g1-idtype1] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g2-idtype0] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g2-idtype1] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g3-idtype0] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g3-idtype1] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g4-idtype0] PASSED [ 46%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g4-idtype1] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g5-idtype0] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g5-idtype1] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g6-idtype0] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-right-g6-idtype1] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g0-idtype0] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g0-idtype1] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g1-idtype0] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g1-idtype1] PASSED [ 47%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g2-idtype0] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g2-idtype1] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g3-idtype0] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g3-idtype1] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g4-idtype0] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g4-idtype1] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g5-idtype0] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g5-idtype1] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g6-idtype0] PASSED [ 48%] tests/tensorflow/test_nn.py::test_graph_conv2[2-False-False-left-g6-idtype1] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-none-g0-idtype0] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-none-g0-idtype1] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-none-g1-idtype0] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-none-g1-idtype1] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-both-g0-idtype0] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-both-g0-idtype1] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-both-g1-idtype0] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-both-g1-idtype1] PASSED [ 49%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-right-g0-idtype0] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-right-g0-idtype1] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-right-g1-idtype0] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-True-right-g1-idtype1] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-none-g0-idtype0] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-none-g0-idtype1] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-none-g1-idtype0] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-none-g1-idtype1] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-both-g0-idtype0] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-both-g0-idtype1] PASSED [ 50%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-both-g1-idtype0] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-both-g1-idtype1] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-right-g0-idtype0] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-right-g0-idtype1] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-right-g1-idtype0] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-True-False-right-g1-idtype1] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-none-g0-idtype0] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-none-g0-idtype1] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-none-g1-idtype0] PASSED [ 51%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-none-g1-idtype1] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-both-g0-idtype0] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-both-g0-idtype1] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-both-g1-idtype0] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-both-g1-idtype1] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-right-g0-idtype0] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-right-g0-idtype1] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-right-g1-idtype0] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-True-right-g1-idtype1] PASSED [ 52%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-none-g0-idtype0] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-none-g0-idtype1] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-none-g1-idtype0] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-none-g1-idtype1] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-both-g0-idtype0] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-both-g0-idtype1] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-both-g1-idtype0] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-both-g1-idtype1] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-right-g0-idtype0] PASSED [ 53%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-right-g0-idtype1] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-right-g1-idtype0] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[1-False-False-right-g1-idtype1] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-none-g0-idtype0] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-none-g0-idtype1] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-none-g1-idtype0] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-none-g1-idtype1] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-both-g0-idtype0] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-both-g0-idtype1] PASSED [ 54%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-both-g1-idtype0] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-both-g1-idtype1] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-right-g0-idtype0] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-right-g0-idtype1] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-right-g1-idtype0] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-True-right-g1-idtype1] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-none-g0-idtype0] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-none-g0-idtype1] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-none-g1-idtype0] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-none-g1-idtype1] PASSED [ 55%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-both-g0-idtype0] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-both-g0-idtype1] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-both-g1-idtype0] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-both-g1-idtype1] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-right-g0-idtype0] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-right-g0-idtype1] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-right-g1-idtype0] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-True-False-right-g1-idtype1] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-none-g0-idtype0] PASSED [ 56%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-none-g0-idtype1] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-none-g1-idtype0] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-none-g1-idtype1] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-both-g0-idtype0] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-both-g0-idtype1] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-both-g1-idtype0] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-both-g1-idtype1] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-right-g0-idtype0] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-right-g0-idtype1] PASSED [ 57%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-right-g1-idtype0] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-True-right-g1-idtype1] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-none-g0-idtype0] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-none-g0-idtype1] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-none-g1-idtype0] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-none-g1-idtype1] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-both-g0-idtype0] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-both-g0-idtype1] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-both-g1-idtype0] PASSED [ 58%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-both-g1-idtype1] PASSED [ 59%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-right-g0-idtype0] PASSED [ 59%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-right-g0-idtype1] PASSED [ 59%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-right-g1-idtype0] PASSED [ 59%] tests/tensorflow/test_nn.py::test_graph_conv2_bi[2-False-False-right-g1-idtype1] PASSED [ 59%] tests/tensorflow/test_nn.py::test_simple_pool PASSED [ 59%] tests/tensorflow/test_nn.py::test_glob_att_pool PASSED [ 59%] tests/tensorflow/test_nn.py::test_rgcn[1] PASSED [ 59%] tests/tensorflow/test_nn.py::test_rgcn[2] PASSED [ 59%] tests/tensorflow/test_nn.py::test_rgcn[8] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g0-idtype0] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g0-idtype1] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g1-idtype0] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g1-idtype1] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g2-idtype0] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g2-idtype1] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g3-idtype0] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g3-idtype1] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g4-idtype0] PASSED [ 60%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g4-idtype1] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g5-idtype0] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g5-idtype1] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g6-idtype0] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-1-g6-idtype1] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g0-idtype0] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g0-idtype1] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g1-idtype0] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g1-idtype1] PASSED [ 61%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g2-idtype0] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g2-idtype1] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g3-idtype0] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g3-idtype1] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g4-idtype0] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g4-idtype1] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g5-idtype0] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g5-idtype1] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g6-idtype0] PASSED [ 62%] tests/tensorflow/test_nn.py::test_gat_conv[1-2-g6-idtype1] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g0-idtype0] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g0-idtype1] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g1-idtype0] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g1-idtype1] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g2-idtype0] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g2-idtype1] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g3-idtype0] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g3-idtype1] PASSED [ 63%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g4-idtype0] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g4-idtype1] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g5-idtype0] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g5-idtype1] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g6-idtype0] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-1-g6-idtype1] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g0-idtype0] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g0-idtype1] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g1-idtype0] PASSED [ 64%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g1-idtype1] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g2-idtype0] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g2-idtype1] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g3-idtype0] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g3-idtype1] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g4-idtype0] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g4-idtype1] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g5-idtype0] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g5-idtype1] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g6-idtype0] PASSED [ 65%] tests/tensorflow/test_nn.py::test_gat_conv[4-2-g6-idtype1] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g0-idtype0] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g0-idtype1] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g1-idtype0] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g1-idtype1] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g0-idtype0] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g0-idtype1] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g1-idtype0] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g1-idtype1] PASSED [ 66%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g0-idtype0] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g0-idtype1] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g1-idtype0] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g1-idtype1] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g0-idtype0] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g0-idtype1] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g1-idtype0] PASSED [ 67%] tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g1-idtype1] PASSED [ 67%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g0-idtype0] PASSED [ 67%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g0-idtype1] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g1-idtype0] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g1-idtype1] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g2-idtype0] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g2-idtype1] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g3-idtype0] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g3-idtype1] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g4-idtype0] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g4-idtype1] PASSED [ 68%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g5-idtype0] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g5-idtype1] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g6-idtype0] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g6-idtype1] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g7-idtype0] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-mean-g7-idtype1] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g0-idtype0] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g0-idtype1] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g1-idtype0] PASSED [ 69%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g1-idtype1] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g2-idtype0] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g2-idtype1] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g3-idtype0] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g3-idtype1] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g4-idtype0] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g4-idtype1] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g5-idtype0] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g5-idtype1] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g6-idtype0] PASSED [ 70%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g6-idtype1] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g7-idtype0] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-pool-g7-idtype1] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g0-idtype0] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g0-idtype1] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g1-idtype0] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g1-idtype1] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g2-idtype0] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g2-idtype1] PASSED [ 71%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g3-idtype0] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g3-idtype1] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g4-idtype0] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g4-idtype1] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g5-idtype0] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g5-idtype1] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g6-idtype0] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g6-idtype1] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g7-idtype0] PASSED [ 72%] tests/tensorflow/test_nn.py::test_sage_conv[1-gcn-g7-idtype1] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g0-idtype0] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g0-idtype1] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g1-idtype0] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g1-idtype1] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g2-idtype0] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g2-idtype1] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g3-idtype0] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g3-idtype1] PASSED [ 73%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g4-idtype0] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g4-idtype1] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g5-idtype0] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g5-idtype1] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g6-idtype0] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g6-idtype1] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g7-idtype0] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-mean-g7-idtype1] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g0-idtype0] PASSED [ 74%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g0-idtype1] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g1-idtype0] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g1-idtype1] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g2-idtype0] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g2-idtype1] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g3-idtype0] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g3-idtype1] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g4-idtype0] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g4-idtype1] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g5-idtype0] PASSED [ 75%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g5-idtype1] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g6-idtype0] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g6-idtype1] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g7-idtype0] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-pool-g7-idtype1] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g0-idtype0] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g0-idtype1] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g1-idtype0] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g1-idtype1] PASSED [ 76%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g2-idtype0] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g2-idtype1] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g3-idtype0] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g3-idtype1] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g4-idtype0] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g4-idtype1] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g5-idtype0] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g5-idtype1] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g6-idtype0] PASSED [ 77%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g6-idtype1] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g7-idtype0] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv[10-gcn-g7-idtype1] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g0-idtype0] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g0-idtype1] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g1-idtype0] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g1-idtype1] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g2-idtype0] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-mean-g2-idtype1] PASSED [ 78%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g0-idtype0] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g0-idtype1] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g1-idtype0] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g1-idtype1] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g2-idtype0] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-pool-g2-idtype1] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g0-idtype0] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g0-idtype1] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g1-idtype0] PASSED [ 79%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g1-idtype1] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g2-idtype0] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[1-gcn-g2-idtype1] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g0-idtype0] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g0-idtype1] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g1-idtype0] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g1-idtype1] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g2-idtype0] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-mean-g2-idtype1] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g0-idtype0] PASSED [ 80%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g0-idtype1] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g1-idtype0] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g1-idtype1] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g2-idtype0] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-pool-g2-idtype1] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g0-idtype0] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g0-idtype1] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g1-idtype0] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g1-idtype1] PASSED [ 81%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g2-idtype0] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi[2-gcn-g2-idtype1] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-mean-idtype0] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-mean-idtype1] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-pool-idtype0] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-pool-idtype1] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-gcn-idtype0] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[1-gcn-idtype1] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-mean-idtype0] PASSED [ 82%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-mean-idtype1] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-pool-idtype0] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-pool-idtype1] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-gcn-idtype0] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sage_conv_bi_empty[2-gcn-idtype1] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g0-idtype0] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g0-idtype1] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g1-idtype0] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g1-idtype1] PASSED [ 83%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g2-idtype0] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g2-idtype1] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g3-idtype0] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g3-idtype1] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g4-idtype0] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g4-idtype1] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g5-idtype0] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[1-g5-idtype1] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g0-idtype0] PASSED [ 84%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g0-idtype1] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g1-idtype0] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g1-idtype1] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g2-idtype0] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g2-idtype1] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g3-idtype0] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g3-idtype1] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g4-idtype0] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g4-idtype1] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g5-idtype0] PASSED [ 85%] tests/tensorflow/test_nn.py::test_sgc_conv[2-g5-idtype1] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g0-idtype0] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g0-idtype1] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g1-idtype0] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g1-idtype1] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g2-idtype0] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g2-idtype1] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g3-idtype0] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g3-idtype1] PASSED [ 86%] tests/tensorflow/test_nn.py::test_appnp_conv[g4-idtype0] PASSED [ 87%] tests/tensorflow/test_nn.py::test_appnp_conv[g4-idtype1] PASSED [ 87%] tests/tensorflow/test_nn.py::test_appnp_conv[g5-idtype0] PASSED [ 87%] tests/tensorflow/test_nn.py::test_appnp_conv[g5-idtype1] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g0-idtype0] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g0-idtype1] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g1-idtype0] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g1-idtype1] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g2-idtype0] PASSED [ 87%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g2-idtype1] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g3-idtype0] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g3-idtype1] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g4-idtype0] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g4-idtype1] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g5-idtype0] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g5-idtype1] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g6-idtype0] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g6-idtype1] PASSED [ 88%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g7-idtype0] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[mean-g7-idtype1] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g0-idtype0] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g0-idtype1] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g1-idtype0] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g1-idtype1] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g2-idtype0] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g2-idtype1] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g3-idtype0] PASSED [ 89%] tests/tensorflow/test_nn.py::test_gin_conv[max-g3-idtype1] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g4-idtype0] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g4-idtype1] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g5-idtype0] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g5-idtype1] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g6-idtype0] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g6-idtype1] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g7-idtype0] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[max-g7-idtype1] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g0-idtype0] PASSED [ 90%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g0-idtype1] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g1-idtype0] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g1-idtype1] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g2-idtype0] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g2-idtype1] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g3-idtype0] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g3-idtype1] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g4-idtype0] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g4-idtype1] PASSED [ 91%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g5-idtype0] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g5-idtype1] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g6-idtype0] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g6-idtype1] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g7-idtype0] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv[sum-g7-idtype1] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g0-idtype0] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g0-idtype1] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g1-idtype0] PASSED [ 92%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g1-idtype1] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g2-idtype0] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[mean-g2-idtype1] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g0-idtype0] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g0-idtype1] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g1-idtype0] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g1-idtype1] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g2-idtype0] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[max-g2-idtype1] PASSED [ 93%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g0-idtype0] PASSED [ 94%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g0-idtype1] PASSED [ 94%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g1-idtype0] PASSED [ 94%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g1-idtype1] PASSED [ 94%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g2-idtype0] PASSED [ 94%] tests/tensorflow/test_nn.py::test_gin_conv_bi[sum-g2-idtype1] PASSED [ 94%] tests/tensorflow/test_nn.py::test_edge_conv[1-g0-idtype0] PASSED [ 94%] tests/tensorflow/test_nn.py::test_edge_conv[1-g0-idtype1] PASSED [ 94%] tests/tensorflow/test_nn.py::test_edge_conv[1-g1-idtype0] PASSED [ 94%] tests/tensorflow/test_nn.py::test_edge_conv[1-g1-idtype1] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g2-idtype0] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g2-idtype1] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g3-idtype0] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g3-idtype1] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g4-idtype0] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g4-idtype1] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g5-idtype0] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g5-idtype1] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g6-idtype0] PASSED [ 95%] tests/tensorflow/test_nn.py::test_edge_conv[1-g6-idtype1] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g0-idtype0] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g0-idtype1] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g1-idtype0] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g1-idtype1] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g2-idtype0] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g2-idtype1] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g3-idtype0] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g3-idtype1] PASSED [ 96%] tests/tensorflow/test_nn.py::test_edge_conv[2-g4-idtype0] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv[2-g4-idtype1] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv[2-g5-idtype0] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv[2-g5-idtype1] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv[2-g6-idtype0] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv[2-g6-idtype1] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv_bi[1-g0-idtype0] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv_bi[1-g0-idtype1] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv_bi[1-g1-idtype0] PASSED [ 97%] tests/tensorflow/test_nn.py::test_edge_conv_bi[1-g1-idtype1] PASSED [ 98%] tests/tensorflow/test_nn.py::test_edge_conv_bi[2-g0-idtype0] PASSED [ 98%] tests/tensorflow/test_nn.py::test_edge_conv_bi[2-g0-idtype1] PASSED [ 98%] tests/tensorflow/test_nn.py::test_edge_conv_bi[2-g1-idtype0] PASSED [ 98%] tests/tensorflow/test_nn.py::test_edge_conv_bi[2-g1-idtype1] PASSED [ 98%] tests/tensorflow/test_nn.py::test_hetero_conv[sum-idtype0] PASSED [ 98%] tests/tensorflow/test_nn.py::test_hetero_conv[sum-idtype1] PASSED [ 98%] tests/tensorflow/test_nn.py::test_hetero_conv[max-idtype0] PASSED [ 98%] tests/tensorflow/test_nn.py::test_hetero_conv[max-idtype1] PASSED [ 98%] tests/tensorflow/test_nn.py::test_hetero_conv[min-idtype0] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[min-idtype1] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[mean-idtype0] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[mean-idtype1] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[stack-idtype0] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[stack-idtype1] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[myagg-idtype0] PASSED [ 99%] tests/tensorflow/test_nn.py::test_hetero_conv[myagg-idtype1] PASSED [ 99%] tests/tensorflow/test_nn.py::test_dense_cheb_conv[1] PASSED [ 99%] tests/tensorflow/test_nn.py::test_dense_cheb_conv[2] PASSED [100%] =============================== warnings summary =============================== ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/iterator_ops.py:546 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/iterator_ops.py:546: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working class IteratorBase(collections.Iterator, trackable.Trackable, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py:106 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py:106: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working class DatasetV2(collections.Iterable, tracking_base.Trackable, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/autograph/utils/testing.py:21 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/tensorflow/python/autograph/utils/testing.py:21: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses import imp ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:10: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _nlv = LooseVersion(_np_version) ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:11: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p16 = _nlv < LooseVersion("1.16") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:12: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p17 = _nlv < LooseVersion("1.17") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:13: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p18 = _nlv < LooseVersion("1.18") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:14: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p19 = _nlv < LooseVersion("1.19") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/__init__.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _np_version_under1p20 = _nlv < LooseVersion("1.20") ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/setuptools/_distutils/version.py:351: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. other = LooseVersion(other) ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/pandas/compat/numpy/function.py:125: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(_np_version) >= LooseVersion("1.17.0"): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:23 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:23: DeprecationWarning: NEAREST is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.NEAREST or Dither.NONE instead. 'nearest': pil_image.NEAREST, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:24 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:24: DeprecationWarning: BILINEAR is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BILINEAR instead. 'bilinear': pil_image.BILINEAR, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:25 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:25: DeprecationWarning: BICUBIC is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BICUBIC instead. 'bicubic': pil_image.BICUBIC, ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:28 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:28: DeprecationWarning: HAMMING is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.HAMMING instead. if hasattr(pil_image, 'HAMMING'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:29 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:29: DeprecationWarning: HAMMING is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.HAMMING instead. _PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:30 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:30: DeprecationWarning: BOX is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BOX instead. if hasattr(pil_image, 'BOX'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:31 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:31: DeprecationWarning: BOX is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BOX instead. _PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:33 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:33: DeprecationWarning: LANCZOS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead. if hasattr(pil_image, 'LANCZOS'): ../../../../opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:34 /opt/conda/envs/tensorflow-ci/lib/python3.7/site-packages/keras_preprocessing/image/utils.py:34: DeprecationWarning: LANCZOS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead. _PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS python/dgl/backend/tensorflow/tensor.py:15 python/dgl/backend/tensorflow/tensor.py:15 /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/backend/tensorflow/tensor.py:15: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(tf.__version__) < LooseVersion("2.3.0"): tests/tensorflow/test_nn.py::test_graph_conv[1] tests/tensorflow/test_nn.py::test_graph_conv[2] tests/tensorflow/test_nn.py::test_simple_pool tests/tensorflow/test_nn.py::test_glob_att_pool tests/tensorflow/test_nn.py::test_rgcn[1] tests/tensorflow/test_nn.py::test_rgcn[2] tests/tensorflow/test_nn.py::test_rgcn[8] tests/tensorflow/test_nn.py::test_dense_cheb_conv[1] tests/tensorflow/test_nn.py::test_dense_cheb_conv[2] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:72: DGLWarning: Recommend creating graphs by `dgl.graph(data)` instead of `dgl.DGLGraph(data)`. dgl_warning('Recommend creating graphs by `dgl.graph(data)`' tests/tensorflow/test_nn.py::test_rgcn[1] tests/tensorflow/test_nn.py::test_rgcn[2] tests/tensorflow/test_nn.py::test_rgcn[8] /root/jenkins/workspace/dgl_PR-4648@2/python/dgl/heterograph.py:84: DGLWarning: Keyword arguments ['readonly'] are deprecated in v0.5, and can be safely removed in all cases. ' removed in all cases.'.format(list(deprecate_kwargs.keys()))) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html - generated xml file: /root/jenkins/workspace/dgl_PR-4648@2/pytest_backend.xml - ============================ slowest 100 durations ============================= 1.37s call tests/tensorflow/test_nn.py::test_graph_conv[1] 1.10s call tests/tensorflow/test_nn.py::test_hetero_conv[sum-idtype0] 1.09s call tests/tensorflow/test_nn.py::test_hetero_conv[min-idtype0] 1.08s call tests/tensorflow/test_nn.py::test_hetero_conv[min-idtype1] 1.08s call tests/tensorflow/test_nn.py::test_hetero_conv[sum-idtype1] 1.06s call tests/tensorflow/test_nn.py::test_graph_conv[2] 0.80s call tests/tensorflow/test_nn.py::test_hetero_conv[mean-idtype0] 0.62s call tests/tensorflow/test_nn.py::test_hetero_conv[stack-idtype1] 0.61s call tests/tensorflow/test_nn.py::test_hetero_conv[max-idtype0] 0.61s call tests/tensorflow/test_nn.py::test_hetero_conv[mean-idtype1] 0.60s call tests/tensorflow/test_nn.py::test_hetero_conv[stack-idtype0] 0.60s call tests/tensorflow/test_nn.py::test_hetero_conv[myagg-idtype0] 0.59s call tests/tensorflow/test_nn.py::test_hetero_conv[max-idtype1] 0.59s call tests/tensorflow/test_nn.py::test_hetero_conv[myagg-idtype1] 0.54s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g0-idtype0] 0.54s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g0-idtype1] 0.53s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g1-idtype0] 0.53s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g4-idtype0] 0.53s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g1-idtype1] 0.53s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g6-idtype0] 0.53s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g3-idtype1] 0.53s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g3-idtype0] 0.53s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g4-idtype1] 0.52s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g2-idtype0] 0.52s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g5-idtype1] 0.52s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g2-idtype1] 0.52s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g6-idtype1] 0.52s call tests/tensorflow/test_nn.py::test_gat_conv[1-1-g5-idtype0] 0.47s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g2-idtype1] 0.46s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g1-idtype0] 0.46s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g1-idtype1] 0.46s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g0-idtype0] 0.46s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g3-idtype0] 0.46s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g2-idtype0] 0.45s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g0-idtype1] 0.45s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g5-idtype1] 0.45s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g3-idtype1] 0.44s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g6-idtype1] 0.44s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g6-idtype0] 0.43s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g1-idtype1] 0.42s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g5-idtype1] 0.42s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g2-idtype0] 0.42s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g0-idtype1] 0.42s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g3-idtype0] 0.41s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g1-idtype0] 0.41s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g3-idtype1] 0.41s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g5-idtype0] 0.41s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g6-idtype1] 0.41s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g2-idtype1] 0.41s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g5-idtype0] 0.41s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g5-idtype0] 0.41s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g6-idtype1] 0.41s call tests/tensorflow/test_nn.py::test_rgcn[8] 0.41s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g6-idtype0] 0.41s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g2-idtype0] 0.40s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g3-idtype1] 0.40s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g0-idtype0] 0.40s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g0-idtype1] 0.40s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g1-idtype1] 0.40s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g0-idtype0] 0.39s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g6-idtype0] 0.39s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g4-idtype0] 0.39s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g4-idtype0] 0.39s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g1-idtype0] 0.38s call tests/tensorflow/test_nn.py::test_rgcn[2] 0.38s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g3-idtype0] 0.38s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g4-idtype1] 0.37s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g1-idtype0] 0.37s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g1-idtype1] 0.36s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g0-idtype0] 0.36s call tests/tensorflow/test_nn.py::test_gat_conv[4-2-g2-idtype1] 0.34s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g5-idtype1] 0.34s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g4-idtype0] 0.33s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-1-g0-idtype1] 0.32s call tests/tensorflow/test_nn.py::test_gat_conv[1-2-g4-idtype1] 0.32s call tests/tensorflow/test_nn.py::test_rgcn[1] 0.30s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g3-idtype0] 0.30s call tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g0-idtype0] 0.30s call tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g1-idtype0] 0.30s call tests/tensorflow/test_nn.py::test_gat_conv_bi[4-1-g0-idtype1] 0.29s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g1-idtype1] 0.29s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g3-idtype1] 0.28s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g0-idtype1] 0.27s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g1-idtype1] 0.27s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g1-idtype0] 0.27s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g0-idtype0] 0.27s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g5-idtype0] 0.27s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g4-idtype0] 0.27s call tests/tensorflow/test_nn.py::test_gat_conv_bi[4-2-g1-idtype1] 0.27s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g1-idtype0] 0.26s call tests/tensorflow/test_nn.py::test_gat_conv_bi[1-2-g0-idtype0] 0.26s call tests/tensorflow/test_nn.py::test_sgc_conv[1-g3-idtype0] 0.26s call tests/tensorflow/test_nn.py::test_sgc_conv[1-g0-idtype0] 0.26s call tests/tensorflow/test_nn.py::test_gat_conv[4-1-g4-idtype1] 0.26s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g2-idtype1] 0.25s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-both-g2-idtype1] 0.25s call tests/tensorflow/test_nn.py::test_sgc_conv[1-g1-idtype1] 0.25s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g0-idtype1] 0.25s call tests/tensorflow/test_nn.py::test_sgc_conv[1-g0-idtype1] 0.25s call tests/tensorflow/test_nn.py::test_graph_conv2[1-True-True-none-g6-idtype0] ================= 920 passed, 35 warnings in 102.04s (0:01:42) ================= [Pipeline] } [Pipeline] // timeout [Pipeline] } [Pipeline] // stage Post stage [Pipeline] cleanWs [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is disabled by the job configuration... [WS-CLEANUP] done [Pipeline] } $ docker stop --time=1 7ac09922888ca0e0d8459513540702ad50c9384fbb02f538e426a164f5d98757 $ docker rm -f 7ac09922888ca0e0d8459513540702ad50c9384fbb02f538e426a164f5d98757 [Pipeline] // withDockerContainer [Pipeline] } [Pipeline] // withEnv [Pipeline] } [Pipeline] // node [Pipeline] } [Pipeline] // stage [Pipeline] } [Pipeline] // parallel [Pipeline] } [Pipeline] // stage [Pipeline] } [Pipeline] // stage [Pipeline] stage [Pipeline] { (Declarative: Post Actions) [Pipeline] script [Pipeline] { [Pipeline] node Running on dglci-post-linux in /home/ubuntu/dgl_ci_post_linux/workspace/dgl_PR-4648 [Pipeline] { [Pipeline] isUnix [Pipeline] withEnv [Pipeline] { [Pipeline] sh + docker inspect -f . dgllib/dgl-ci-awscli:v220418 . [Pipeline] } [Pipeline] // withEnv [Pipeline] withDockerContainer dglci-post-linux does not seem to be running inside a container $ docker run -t -d -u 0:0 --pull always --entrypoint= -w /home/ubuntu/dgl_ci_post_linux/workspace/dgl_PR-4648 -v /home/ubuntu/dgl_ci_post_linux/workspace/dgl_PR-4648:/home/ubuntu/dgl_ci_post_linux/workspace/dgl_PR-4648:rw,z -v /home/ubuntu/dgl_ci_post_linux/workspace/dgl_PR-4648@tmp:/home/ubuntu/dgl_ci_post_linux/workspace/dgl_PR-4648@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** dgllib/dgl-ci-awscli:v220418 cat $ docker top d465420ececd46eddeed628742417712df14465db5d2d5b2af636540c58e3707 -eo pid,comm [Pipeline] { [Pipeline] sh + rm -rf ci_tmp [Pipeline] dir Running in /home/ubuntu/dgl_ci_post_linux/workspace/dgl_PR-4648/ci_tmp [Pipeline] { [Pipeline] sh + curl -k -o cireport.log https://dgl-jenkins-eksvpc-2136217999.us-west-2.elb.amazonaws.com/job/dgl/job/PR-4648/2/consoleText % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed